++ echo 'Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/logs/expose-sharded.log' Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/logs/expose-sharded.log ++ '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP_AZURE= ++ oc get projects ++ kubectl get nodes ++ grep '^minikube' +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' grep: warning: stray \ before - Warning: version difference between client (1.34) and server (1.30) exceeds the supported minor version skew of +/-1 ++ '[' ']' ++ EKS=0 +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep gke Warning: version difference between client (1.34) and server (1.30) exceeds the supported minor version skew of +/-1 ++ '[' v1.30.14-gke.1325000 ']' ++ GKE=1 +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/sbin/sed -r 's/[^0-9.]+//g' Warning: version difference between client (1.34) and server (1.30) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.30 + main + create_infra expose-sharded-21694 + local ns=expose-sharded-21694 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.FZ1c1POOSj ++ mktemp + local LAST_ERR=/tmp/tmp.2hvsWN5jhO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FZ1c1POOSj customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.2hvsWN5jhO + rm /tmp/tmp.FZ1c1POOSj /tmp/tmp.2hvsWN5jhO + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.gTcWjAFuji ++ mktemp + local LAST_ERR=/tmp/tmp.QPosXRzFX7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gTcWjAFuji + cat /tmp/tmp.QPosXRzFX7 + rm /tmp/tmp.gTcWjAFuji /tmp/tmp.QPosXRzFX7 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.X5pjcJiJFs ++ mktemp + local LAST_ERR=/tmp/tmp.kKsiswcQ7P + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.X5pjcJiJFs + cat /tmp/tmp.kKsiswcQ7P + rm /tmp/tmp.X5pjcJiJFs /tmp/tmp.kKsiswcQ7P + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.wOz1an4LSX ++ mktemp + local LAST_ERR=/tmp/tmp.6Q2OkfbHYC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wOz1an4LSX + cat /tmp/tmp.6Q2OkfbHYC + rm /tmp/tmp.wOz1an4LSX /tmp/tmp.6Q2OkfbHYC + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.sewHC0Vfob ++ mktemp + local LAST_ERR=/tmp/tmp.ptGJR1XCaK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sewHC0Vfob clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.ptGJR1XCaK + rm /tmp/tmp.sewHC0Vfob /tmp/tmp.ptGJR1XCaK + return 0 + check_crd_for_deletion PR-1993-afe14c70 + local git_tag=PR-1993-afe14c70 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1993-afe14c70/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uuWXlDASlA +++ mktemp ++ local LAST_ERR=/tmp/tmp.B2gMcxS2OP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.uuWXlDASlA ++ cat /tmp/tmp.B2gMcxS2OP Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.uuWXlDASlA ++ cat /tmp/tmp.B2gMcxS2OP Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.uuWXlDASlA ++ cat /tmp/tmp.B2gMcxS2OP Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.uuWXlDASlA ++ cat /tmp/tmp.B2gMcxS2OP Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.uuWXlDASlA /tmp/tmp.B2gMcxS2OP ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' ++ mktemp + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.3oiNPGZNX1 + local LAST_OUT=/tmp/tmp.QrFEDdcoHX ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.JZmjM8iatr + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.2xfJ0Fz5CY + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3oiNPGZNX1 + cat /tmp/tmp.JZmjM8iatr + rm /tmp/tmp.3oiNPGZNX1 /tmp/tmp.JZmjM8iatr + return 0 namespace "expose-sharded-10462" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QrFEDdcoHX namespace "psmdb-operator" deleted + cat /tmp/tmp.2xfJ0Fz5CY + rm /tmp/tmp.QrFEDdcoHX /tmp/tmp.2xfJ0Fz5CY + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.XyNFbBI3Fv ++ mktemp + local LAST_ERR=/tmp/tmp.d1dSgEHXzI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XyNFbBI3Fv + cat /tmp/tmp.d1dSgEHXzI + rm /tmp/tmp.XyNFbBI3Fv /tmp/tmp.d1dSgEHXzI + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Piop5VyYMS ++ mktemp + local LAST_ERR=/tmp/tmp.ZgidxxM9CX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Piop5VyYMS namespace/psmdb-operator created + cat /tmp/tmp.ZgidxxM9CX + rm /tmp/tmp.Piop5VyYMS /tmp/tmp.ZgidxxM9CX + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.ATL8hnA99r +++ mktemp ++ local LAST_ERR=/tmp/tmp.U3D1dVeYev ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ATL8hnA99r ++ cat /tmp/tmp.U3D1dVeYev ++ rm /tmp/tmp.ATL8hnA99r /tmp/tmp.U3D1dVeYev ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-afe14c70-12-cluster8 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.3lI0nqzdHl ++ mktemp + local LAST_ERR=/tmp/tmp.MrzyWI6Aoa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-afe14c70-12-cluster8 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3lI0nqzdHl Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-afe14c70-12-cluster8" modified. + cat /tmp/tmp.MrzyWI6Aoa + rm /tmp/tmp.3lI0nqzdHl /tmp/tmp.MrzyWI6Aoa + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.iXnKNkOMZ6 ++ mktemp + local LAST_ERR=/tmp/tmp.GJIy1NKrJJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iXnKNkOMZ6 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.GJIy1NKrJJ + rm /tmp/tmp.iXnKNkOMZ6 /tmp/tmp.GJIy1NKrJJ + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.nLba2JBpt7 ++ mktemp + local LAST_ERR=/tmp/tmp.7mhGE0rRoh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nLba2JBpt7 clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.7mhGE0rRoh + rm /tmp/tmp.nLba2JBpt7 /tmp/tmp.7mhGE0rRoh + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1993-afe14c70") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.9v5nDRk6qW ++ mktemp + local LAST_ERR=/tmp/tmp.wYkgbzqexo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9v5nDRk6qW deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.wYkgbzqexo + rm /tmp/tmp.9v5nDRk6qW /tmp/tmp.wYkgbzqexo + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.2IwA4up8J1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.oPfAWnauBi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2IwA4up8J1 ++ cat /tmp/tmp.oPfAWnauBi ++ rm /tmp/tmp.2IwA4up8J1 /tmp/tmp.oPfAWnauBi ++ return 0 + wait_pod percona-server-mongodb-operator-b7c64c587-xz5tx + local pod=percona-server-mongodb-operator-b7c64c587-xz5tx + set +o xtrace waiting for pod/percona-server-mongodb-operator-b7c64c587-xz5tx to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.ejrdjw4wZb +++ mktemp ++ local LAST_ERR=/tmp/tmp.354kgEGSVR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ejrdjw4wZb ++ cat /tmp/tmp.354kgEGSVR ++ rm /tmp/tmp.ejrdjw4wZb /tmp/tmp.354kgEGSVR ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-b7c64c587-xz5tx ++ mktemp + local LAST_OUT=/tmp/tmp.nX8RH7Uvos ++ mktemp + local LAST_ERR=/tmp/tmp.kOKCvUSfP4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs percona-server-mongodb-operator-b7c64c587-xz5tx + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nX8RH7Uvos + cat /tmp/tmp.kOKCvUSfP4 + rm /tmp/tmp.nX8RH7Uvos /tmp/tmp.kOKCvUSfP4 + return 0 2025-10-06T20:47:44.009Z INFO setup Manager starting up {"gitCommit": "afe14c7037e555af435bdbf8ce6cdb2ddbfc4d36", "gitBranch": "PR-1993-afe14c70", "buildTime": "", "goVersion": "go1.25.1", "os": "linux", "arch": "amd64"} + create_namespace expose-sharded-21694 + local namespace=expose-sharded-21694 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' ++ mktemp + desc 'cleaned up old namespaces expose-sharded-21694' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces expose-sharded-21694 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace expose-sharded-21694 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.T9ibcKRqUK ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.nsRcJr7ua6 + local LAST_ERR=/tmp/tmp.2AZBMeCGBP + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.uMXu7B9zTc + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get ns ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace expose-sharded-21694 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.T9ibcKRqUK + cat /tmp/tmp.2AZBMeCGBP + rm /tmp/tmp.T9ibcKRqUK /tmp/tmp.2AZBMeCGBP + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nsRcJr7ua6 + cat /tmp/tmp.uMXu7B9zTc + rm /tmp/tmp.nsRcJr7ua6 /tmp/tmp.uMXu7B9zTc + return 0 + kubectl_bin wait --for=delete namespace expose-sharded-21694 ++ mktemp + local LAST_OUT=/tmp/tmp.IxIoLqB1Fh ++ mktemp + local LAST_ERR=/tmp/tmp.WZAYVMrvPA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace expose-sharded-21694 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IxIoLqB1Fh + cat /tmp/tmp.WZAYVMrvPA + rm /tmp/tmp.IxIoLqB1Fh /tmp/tmp.WZAYVMrvPA + return 0 + desc 'create namespace expose-sharded-21694' + set +o xtrace ----------------------------------------------------------------------------------- create namespace expose-sharded-21694 ----------------------------------------------------------------------------------- + kubectl_bin create namespace expose-sharded-21694 ++ mktemp + local LAST_OUT=/tmp/tmp.b1QjtrWcLk ++ mktemp + local LAST_ERR=/tmp/tmp.gWpoxcjXmu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace expose-sharded-21694 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.b1QjtrWcLk namespace/expose-sharded-21694 created + cat /tmp/tmp.gWpoxcjXmu + rm /tmp/tmp.b1QjtrWcLk /tmp/tmp.gWpoxcjXmu + return 0 + set_kube_ctx expose-sharded-21694 + local namespace=expose-sharded-21694 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.YJupVeLMkx +++ mktemp ++ local LAST_ERR=/tmp/tmp.YxY3f6WV3n ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YJupVeLMkx ++ cat /tmp/tmp.YxY3f6WV3n ++ rm /tmp/tmp.YJupVeLMkx /tmp/tmp.YxY3f6WV3n ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-afe14c70-12-cluster8 --namespace=expose-sharded-21694 ++ mktemp + local LAST_OUT=/tmp/tmp.C39Y7fICXf ++ mktemp + local LAST_ERR=/tmp/tmp.esDtycgBVx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-afe14c70-12-cluster8 --namespace=expose-sharded-21694 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.C39Y7fICXf Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-afe14c70-12-cluster8" modified. + cat /tmp/tmp.esDtycgBVx + rm /tmp/tmp.C39Y7fICXf /tmp/tmp.esDtycgBVx + return 0 + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + cluster=some-name + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.gkFtemHo8s ++ mktemp + local LAST_ERR=/tmp/tmp.k51qyLbvU9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gkFtemHo8s secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.k51qyLbvU9 + rm /tmp/tmp.gkFtemHo8s /tmp/tmp.k51qyLbvU9 + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.jJLvTnrUV9 ++ mktemp + local LAST_ERR=/tmp/tmp.4pDYTTCP3G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jJLvTnrUV9 secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.4pDYTTCP3G + rm /tmp/tmp.jJLvTnrUV9 /tmp/tmp.4pDYTTCP3G + return 0 + version_gt 1.19 ++ echo '1.30 >= 1.19' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' 0 -ne 1 ']' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/container-rc.yaml + kubectl_bin apply -f - + /usr/sbin/sed s/docker/runc/g ++ mktemp + local LAST_OUT=/tmp/tmp.quFPZA4H59 ++ mktemp + local LAST_ERR=/tmp/tmp.xnEktEKGrn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.quFPZA4H59 runtimeclass.node.k8s.io/container-rc unchanged + cat /tmp/tmp.xnEktEKGrn + rm /tmp/tmp.quFPZA4H59 /tmp/tmp.xnEktEKGrn + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/conf/some-name-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/conf/some-name-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1993-afe14c70"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.kzPsXcjYIk ++ mktemp + local LAST_ERR=/tmp/tmp.U74RohEe5N + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kzPsXcjYIk perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.U74RohEe5N + rm /tmp/tmp.kzPsXcjYIk /tmp/tmp.U74RohEe5N + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.X3gHMG0HDe +++ mktemp ++ local LAST_ERR=/tmp/tmp.Sj7t1Ffz2W ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.X3gHMG0HDe ++ cat /tmp/tmp.Sj7t1Ffz2W ++ rm /tmp/tmp.X3gHMG0HDe /tmp/tmp.Sj7t1Ffz2W ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.....OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.b4N9S71PfY +++ mktemp ++ local LAST_ERR=/tmp/tmp.FYoVH48QE6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.b4N9S71PfY ++ cat /tmp/tmp.FYoVH48QE6 ++ rm /tmp/tmp.b4N9S71PfY /tmp/tmp.FYoVH48QE6 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HyAoSmZ2Gb +++ mktemp ++ local LAST_ERR=/tmp/tmp.ld5ENusEr9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HyAoSmZ2Gb ++ cat /tmp/tmp.ld5ENusEr9 ++ rm /tmp/tmp.HyAoSmZ2Gb /tmp/tmp.ld5ENusEr9 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness........................ + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qvQbaEriG0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.aYAudyHbi5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qvQbaEriG0 ++ cat /tmp/tmp.aYAudyHbi5 ++ rm /tmp/tmp.qvQbaEriG0 /tmp/tmp.aYAudyHbi5 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.t6KRauBAHF +++ mktemp ++ local LAST_ERR=/tmp/tmp.cHt74kXKD6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.t6KRauBAHF ++ cat /tmp/tmp.cHt74kXKD6 ++ rm /tmp/tmp.t6KRauBAHF /tmp/tmp.cHt74kXKD6 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pF6T5VGrEq +++ mktemp ++ local LAST_ERR=/tmp/tmp.zbuM6ELjSG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pF6T5VGrEq ++ cat /tmp/tmp.zbuM6ELjSG ++ rm /tmp/tmp.pF6T5VGrEq /tmp/tmp.zbuM6ELjSG ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lqS1zZzt3q +++ mktemp ++ local LAST_ERR=/tmp/tmp.dEVurmrRHG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lqS1zZzt3q ++ cat /tmp/tmp.dEVurmrRHG ++ rm /tmp/tmp.lqS1zZzt3q /tmp/tmp.dEVurmrRHG ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QhkVdtGQd9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.4PvUwT8gtc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QhkVdtGQd9 ++ cat /tmp/tmp.4PvUwT8gtc ++ rm /tmp/tmp.QhkVdtGQd9 /tmp/tmp.4PvUwT8gtc ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3Xujk2hMyq +++ mktemp ++ local LAST_ERR=/tmp/tmp.2YIFSLus9Y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3Xujk2hMyq ++ cat /tmp/tmp.2YIFSLus9Y ++ rm /tmp/tmp.3Xujk2hMyq /tmp/tmp.2YIFSLus9Y ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eyOi8HEDPG +++ mktemp ++ local LAST_ERR=/tmp/tmp.z59autyyQR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eyOi8HEDPG ++ cat /tmp/tmp.z59autyyQR ++ rm /tmp/tmp.eyOi8HEDPG /tmp/tmp.z59autyyQR ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.wBdHIypxeu/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-21694", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.480T5EaHn6 ++ mktemp + local LAST_ERR=/tmp/tmp.8Q7ckpVEQ2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.480T5EaHn6 + cat /tmp/tmp.8Q7ckpVEQ2 + rm /tmp/tmp.480T5EaHn6 /tmp/tmp.8Q7ckpVEQ2 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0.yml /tmp/tmp.wBdHIypxeu/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-10-06T20:51:06+0000] compare_kubectl: statefulset/some-name-rs0 OK + compare_kubectl statefulset/some-name-cfg + local resource=statefulset/some-name-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml + local new_result=/tmp/tmp.wBdHIypxeu/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-21694", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.TuBQMhiM5g ++ mktemp + local LAST_ERR=/tmp/tmp.bBGEuBUzZn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TuBQMhiM5g + cat /tmp/tmp.bBGEuBUzZn + rm /tmp/tmp.TuBQMhiM5g /tmp/tmp.bBGEuBUzZn + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml /tmp/tmp.wBdHIypxeu/statefulset_some-name-cfg.yml + log 'compare_kubectl: statefulset/some-name-cfg OK' + set +o xtrace [2025-10-06T20:51:07+0000] compare_kubectl: statefulset/some-name-cfg OK + compare_kubectl statefulset/some-name-mongos '' + local resource=statefulset/some-name-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml + local new_result=/tmp/tmp.wBdHIypxeu/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-21694", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.BVS33vGuST ++ mktemp + local LAST_ERR=/tmp/tmp.y06hG85VKQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BVS33vGuST + cat /tmp/tmp.y06hG85VKQ + rm /tmp/tmp.BVS33vGuST /tmp/tmp.y06hG85VKQ + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-mongos.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml /tmp/tmp.wBdHIypxeu/statefulset_some-name-mongos.yml + log 'compare_kubectl: statefulset/some-name-mongos OK' + set +o xtrace [2025-10-06T20:51:08+0000] compare_kubectl: statefulset/some-name-mongos OK + desc 'disabling sharding' + set +o xtrace ----------------------------------------------------------------------------------- disabling sharding ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=json '-p=[{"op": "replace", "path": "/spec/sharding/enabled", "value": false}]' ++ mktemp + local LAST_OUT=/tmp/tmp.3UJGCfJ5rv ++ mktemp + local LAST_ERR=/tmp/tmp.gqGckV8HrS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=json '-p=[{"op": "replace", "path": "/spec/sharding/enabled", "value": false}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3UJGCfJ5rv perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.gqGckV8HrS + rm /tmp/tmp.3UJGCfJ5rv /tmp/tmp.gqGckV8HrS + return 0 + sleep 10 + wait_cluster_consistency some-name 60 + local cluster_name=some-name + local wait_time=60 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5NIDDH3t3Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.RqdXGvogXb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5NIDDH3t3Q ++ cat /tmp/tmp.RqdXGvogXb ++ rm /tmp/tmp.5NIDDH3t3Q /tmp/tmp.RqdXGvogXb ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jfmSku68Pn +++ mktemp ++ local LAST_ERR=/tmp/tmp.csaFqZdWwB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jfmSku68Pn ++ cat /tmp/tmp.csaFqZdWwB ++ rm /tmp/tmp.jfmSku68Pn /tmp/tmp.csaFqZdWwB ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EgzxO3Kne8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.zvhUOGyFPW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EgzxO3Kne8 ++ cat /tmp/tmp.zvhUOGyFPW ++ rm /tmp/tmp.EgzxO3Kne8 /tmp/tmp.zvhUOGyFPW ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UoOnI7Ddnn +++ mktemp ++ local LAST_ERR=/tmp/tmp.zojf3myZhK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UoOnI7Ddnn ++ cat /tmp/tmp.zojf3myZhK ++ rm /tmp/tmp.UoOnI7Ddnn /tmp/tmp.zojf3myZhK ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nyTKwNJSov +++ mktemp ++ local LAST_ERR=/tmp/tmp.7AGyChSb1s ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nyTKwNJSov ++ cat /tmp/tmp.7AGyChSb1s ++ rm /tmp/tmp.nyTKwNJSov /tmp/tmp.7AGyChSb1s ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ADr6Dk3p03 +++ mktemp ++ local LAST_ERR=/tmp/tmp.YbrcHBuphd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ADr6Dk3p03 ++ cat /tmp/tmp.YbrcHBuphd ++ rm /tmp/tmp.ADr6Dk3p03 /tmp/tmp.YbrcHBuphd ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rcfYDRqSDj +++ mktemp ++ local LAST_ERR=/tmp/tmp.MIm8m91qsB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rcfYDRqSDj ++ cat /tmp/tmp.MIm8m91qsB ++ rm /tmp/tmp.rcfYDRqSDj /tmp/tmp.MIm8m91qsB ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZJTReIuHc2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.8Hgy8x7mnK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZJTReIuHc2 ++ cat /tmp/tmp.8Hgy8x7mnK ++ rm /tmp/tmp.ZJTReIuHc2 /tmp/tmp.8Hgy8x7mnK ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DrMmhp71Vz +++ mktemp ++ local LAST_ERR=/tmp/tmp.LXblO3SnpB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DrMmhp71Vz ++ cat /tmp/tmp.LXblO3SnpB ++ rm /tmp/tmp.DrMmhp71Vz /tmp/tmp.LXblO3SnpB ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GCEH4HJPqG +++ mktemp ++ local LAST_ERR=/tmp/tmp.2dmV4tJ8hn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GCEH4HJPqG ++ cat /tmp/tmp.2dmV4tJ8hn ++ rm /tmp/tmp.GCEH4HJPqG /tmp/tmp.2dmV4tJ8hn ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.67ikmczYg9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.9AhGhCqEEw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.67ikmczYg9 ++ cat /tmp/tmp.9AhGhCqEEw ++ rm /tmp/tmp.67ikmczYg9 /tmp/tmp.9AhGhCqEEw ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DPb9E3gLlk +++ mktemp ++ local LAST_ERR=/tmp/tmp.0ORKWYWE6U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DPb9E3gLlk ++ cat /tmp/tmp.0ORKWYWE6U ++ rm /tmp/tmp.DPb9E3gLlk /tmp/tmp.0ORKWYWE6U ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KoEsoXmnRy +++ mktemp ++ local LAST_ERR=/tmp/tmp.36x8bqzgzy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KoEsoXmnRy ++ cat /tmp/tmp.36x8bqzgzy ++ rm /tmp/tmp.KoEsoXmnRy /tmp/tmp.36x8bqzgzy ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RDtJHHTdvh +++ mktemp ++ local LAST_ERR=/tmp/tmp.BmGCBwYXcX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RDtJHHTdvh ++ cat /tmp/tmp.BmGCBwYXcX ++ rm /tmp/tmp.RDtJHHTdvh /tmp/tmp.BmGCBwYXcX ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XFxhABpH24 +++ mktemp ++ local LAST_ERR=/tmp/tmp.HEalH3pSfL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XFxhABpH24 ++ cat /tmp/tmp.HEalH3pSfL ++ rm /tmp/tmp.XFxhABpH24 /tmp/tmp.HEalH3pSfL ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.n7unCaHJCX +++ mktemp ++ local LAST_ERR=/tmp/tmp.vzVUF0nfPN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.n7unCaHJCX ++ cat /tmp/tmp.vzVUF0nfPN ++ rm /tmp/tmp.n7unCaHJCX /tmp/tmp.vzVUF0nfPN ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.G3Gn626xh2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.E2XJPVyxr5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.G3Gn626xh2 ++ cat /tmp/tmp.E2XJPVyxr5 ++ rm /tmp/tmp.G3Gn626xh2 /tmp/tmp.E2XJPVyxr5 ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 17 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rJwM5Hx5SF +++ mktemp ++ local LAST_ERR=/tmp/tmp.Y4JaEYEutT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rJwM5Hx5SF ++ cat /tmp/tmp.Y4JaEYEutT ++ rm /tmp/tmp.rJwM5Hx5SF /tmp/tmp.Y4JaEYEutT ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 18 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8sf6nfBxtZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.yYux1BWhaP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8sf6nfBxtZ ++ cat /tmp/tmp.yYux1BWhaP ++ rm /tmp/tmp.8sf6nfBxtZ /tmp/tmp.yYux1BWhaP ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 19 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ItcgrkoBa8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.HWTMARckkr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ItcgrkoBa8 ++ cat /tmp/tmp.HWTMARckkr ++ rm /tmp/tmp.ItcgrkoBa8 /tmp/tmp.HWTMARckkr ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 20 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nMHfch4YqW +++ mktemp ++ local LAST_ERR=/tmp/tmp.b9J2N36iEQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nMHfch4YqW ++ cat /tmp/tmp.b9J2N36iEQ ++ rm /tmp/tmp.nMHfch4YqW /tmp/tmp.b9J2N36iEQ ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 21 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CZslWgWHyU +++ mktemp ++ local LAST_ERR=/tmp/tmp.qXq5bPnT7w ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CZslWgWHyU ++ cat /tmp/tmp.qXq5bPnT7w ++ rm /tmp/tmp.CZslWgWHyU /tmp/tmp.qXq5bPnT7w ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 22 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cLae1WU86y +++ mktemp ++ local LAST_ERR=/tmp/tmp.dbxY2Or4Io ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cLae1WU86y ++ cat /tmp/tmp.dbxY2Or4Io ++ rm /tmp/tmp.cLae1WU86y /tmp/tmp.dbxY2Or4Io ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 23 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wxdBiFiS6w +++ mktemp ++ local LAST_ERR=/tmp/tmp.snDknWiMXx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wxdBiFiS6w ++ cat /tmp/tmp.snDknWiMXx ++ rm /tmp/tmp.wxdBiFiS6w /tmp/tmp.snDknWiMXx ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 24 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cqyhusfjQs +++ mktemp ++ local LAST_ERR=/tmp/tmp.f9HbULtvPf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cqyhusfjQs ++ cat /tmp/tmp.f9HbULtvPf ++ rm /tmp/tmp.cqyhusfjQs /tmp/tmp.f9HbULtvPf ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 25 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rE3s1Wabc8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.SiZHkJr243 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rE3s1Wabc8 ++ cat /tmp/tmp.SiZHkJr243 ++ rm /tmp/tmp.rE3s1Wabc8 /tmp/tmp.SiZHkJr243 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 26 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hMkXTqBCWM +++ mktemp ++ local LAST_ERR=/tmp/tmp.4xzW6JvGH2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hMkXTqBCWM ++ cat /tmp/tmp.4xzW6JvGH2 ++ rm /tmp/tmp.hMkXTqBCWM /tmp/tmp.4xzW6JvGH2 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 27 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WiWfyLV6S2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.x3xwO5xXfw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WiWfyLV6S2 ++ cat /tmp/tmp.x3xwO5xXfw ++ rm /tmp/tmp.WiWfyLV6S2 /tmp/tmp.x3xwO5xXfw ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 28 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BcZQALM8AS +++ mktemp ++ local LAST_ERR=/tmp/tmp.Emd24DNiVu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BcZQALM8AS ++ cat /tmp/tmp.Emd24DNiVu ++ rm /tmp/tmp.BcZQALM8AS /tmp/tmp.Emd24DNiVu ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_kubectl statefulset/some-name-rs0 -sharding-disabled + local resource=statefulset/some-name-rs0 + local postfix=-sharding-disabled + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-disabled.yml + local new_result=/tmp/tmp.wBdHIypxeu/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-disabled-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-21694", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.z7uQDklkU9 ++ mktemp + local LAST_ERR=/tmp/tmp.NkzbTcZpus + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.z7uQDklkU9 + cat /tmp/tmp.NkzbTcZpus + rm /tmp/tmp.z7uQDklkU9 /tmp/tmp.NkzbTcZpus + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-disabled.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-disabled.yml /tmp/tmp.wBdHIypxeu/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-10-06T20:56:43+0000] compare_kubectl: statefulset/some-name-rs0 OK ++ kubectl_bin get sts -o yaml ++ yq '.items | length' +++ mktemp ++ local LAST_OUT=/tmp/tmp.G32rueQdte +++ mktemp ++ local LAST_ERR=/tmp/tmp.V5XbR9IFNw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get sts -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.G32rueQdte ++ cat /tmp/tmp.V5XbR9IFNw ++ rm /tmp/tmp.G32rueQdte /tmp/tmp.V5XbR9IFNw ++ return 0 + [[ 1 != 1 ]] + desc 'enabling sharding' + set +o xtrace ----------------------------------------------------------------------------------- enabling sharding ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=json '-p=[{"op": "replace", "path": "/spec/sharding/enabled", "value": true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.zSI9LqlgA6 ++ mktemp + local LAST_ERR=/tmp/tmp.n80PhWWWj1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=json '-p=[{"op": "replace", "path": "/spec/sharding/enabled", "value": true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zSI9LqlgA6 perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.n80PhWWWj1 + rm /tmp/tmp.zSI9LqlgA6 /tmp/tmp.n80PhWWWj1 + return 0 + sleep 10 + wait_cluster_consistency some-name 60 + local cluster_name=some-name + local wait_time=60 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MvIolX5KTT +++ mktemp ++ local LAST_ERR=/tmp/tmp.9f13k5gJBD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MvIolX5KTT ++ cat /tmp/tmp.9f13k5gJBD ++ rm /tmp/tmp.MvIolX5KTT /tmp/tmp.9f13k5gJBD ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.coV5X8lTZD +++ mktemp ++ local LAST_ERR=/tmp/tmp.8Ow15qQVKs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.coV5X8lTZD ++ cat /tmp/tmp.8Ow15qQVKs ++ rm /tmp/tmp.coV5X8lTZD /tmp/tmp.8Ow15qQVKs ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.v1xjXe8hge +++ mktemp ++ local LAST_ERR=/tmp/tmp.4X5ks2h1Ow ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v1xjXe8hge ++ cat /tmp/tmp.4X5ks2h1Ow ++ rm /tmp/tmp.v1xjXe8hge /tmp/tmp.4X5ks2h1Ow ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CHSAp1Omg2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZBscWmc6F9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CHSAp1Omg2 ++ cat /tmp/tmp.ZBscWmc6F9 ++ rm /tmp/tmp.CHSAp1Omg2 /tmp/tmp.ZBscWmc6F9 ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LLVvkwQPyx +++ mktemp ++ local LAST_ERR=/tmp/tmp.eqC5iawhp4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LLVvkwQPyx ++ cat /tmp/tmp.eqC5iawhp4 ++ rm /tmp/tmp.LLVvkwQPyx /tmp/tmp.eqC5iawhp4 ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QIFqLVbD1T +++ mktemp ++ local LAST_ERR=/tmp/tmp.F06c9Yvzen ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QIFqLVbD1T ++ cat /tmp/tmp.F06c9Yvzen ++ rm /tmp/tmp.QIFqLVbD1T /tmp/tmp.F06c9Yvzen ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ufY6Glf3q6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.TL2Jlon6dG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ufY6Glf3q6 ++ cat /tmp/tmp.TL2Jlon6dG ++ rm /tmp/tmp.ufY6Glf3q6 /tmp/tmp.TL2Jlon6dG ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.twMZrL423e +++ mktemp ++ local LAST_ERR=/tmp/tmp.swHgLum1ZS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.twMZrL423e ++ cat /tmp/tmp.swHgLum1ZS ++ rm /tmp/tmp.twMZrL423e /tmp/tmp.swHgLum1ZS ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qllKoN8Aqp +++ mktemp ++ local LAST_ERR=/tmp/tmp.7TJJAicoXr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qllKoN8Aqp ++ cat /tmp/tmp.7TJJAicoXr ++ rm /tmp/tmp.qllKoN8Aqp /tmp/tmp.7TJJAicoXr ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.berDlWXcHa +++ mktemp ++ local LAST_ERR=/tmp/tmp.MLhr2mNEZq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.berDlWXcHa ++ cat /tmp/tmp.MLhr2mNEZq ++ rm /tmp/tmp.berDlWXcHa /tmp/tmp.MLhr2mNEZq ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WerMYy0Bvz +++ mktemp ++ local LAST_ERR=/tmp/tmp.OaPhFxP9D5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WerMYy0Bvz ++ cat /tmp/tmp.OaPhFxP9D5 ++ rm /tmp/tmp.WerMYy0Bvz /tmp/tmp.OaPhFxP9D5 ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SwCt0hAkDE +++ mktemp ++ local LAST_ERR=/tmp/tmp.Nx1tUiu8L9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SwCt0hAkDE ++ cat /tmp/tmp.Nx1tUiu8L9 ++ rm /tmp/tmp.SwCt0hAkDE /tmp/tmp.Nx1tUiu8L9 ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Vrlb1SWCXI +++ mktemp ++ local LAST_ERR=/tmp/tmp.CUZKJEm7o6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Vrlb1SWCXI ++ cat /tmp/tmp.CUZKJEm7o6 ++ rm /tmp/tmp.Vrlb1SWCXI /tmp/tmp.CUZKJEm7o6 ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ajD4pouDjV +++ mktemp ++ local LAST_ERR=/tmp/tmp.4C1Jvbl7b6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ajD4pouDjV ++ cat /tmp/tmp.4C1Jvbl7b6 ++ rm /tmp/tmp.ajD4pouDjV /tmp/tmp.4C1Jvbl7b6 ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vVO74ZbCMq +++ mktemp ++ local LAST_ERR=/tmp/tmp.W534nHNFvJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vVO74ZbCMq ++ cat /tmp/tmp.W534nHNFvJ ++ rm /tmp/tmp.vVO74ZbCMq /tmp/tmp.W534nHNFvJ ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jaIqa1moTg +++ mktemp ++ local LAST_ERR=/tmp/tmp.6OCI7UOMKT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jaIqa1moTg ++ cat /tmp/tmp.6OCI7UOMKT ++ rm /tmp/tmp.jaIqa1moTg /tmp/tmp.6OCI7UOMKT ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Rfy75FuNp9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.35OLFQCK90 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Rfy75FuNp9 ++ cat /tmp/tmp.35OLFQCK90 ++ rm /tmp/tmp.Rfy75FuNp9 /tmp/tmp.35OLFQCK90 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 17 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZrG7oWxG3j +++ mktemp ++ local LAST_ERR=/tmp/tmp.P1KdHXjwIq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZrG7oWxG3j ++ cat /tmp/tmp.P1KdHXjwIq ++ rm /tmp/tmp.ZrG7oWxG3j /tmp/tmp.P1KdHXjwIq ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 18 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FWoz4Ua7qC +++ mktemp ++ local LAST_ERR=/tmp/tmp.PRcDswnUbI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FWoz4Ua7qC ++ cat /tmp/tmp.PRcDswnUbI ++ rm /tmp/tmp.FWoz4Ua7qC /tmp/tmp.PRcDswnUbI ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 19 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4d0k38rjK4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ExcPgEyg0j ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4d0k38rjK4 ++ cat /tmp/tmp.ExcPgEyg0j ++ rm /tmp/tmp.4d0k38rjK4 /tmp/tmp.ExcPgEyg0j ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 20 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GbGyNiFZFM +++ mktemp ++ local LAST_ERR=/tmp/tmp.thJnhZUHtH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GbGyNiFZFM ++ cat /tmp/tmp.thJnhZUHtH ++ rm /tmp/tmp.GbGyNiFZFM /tmp/tmp.thJnhZUHtH ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 21 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2fNhGFhsEL +++ mktemp ++ local LAST_ERR=/tmp/tmp.JngWcRN42h ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2fNhGFhsEL ++ cat /tmp/tmp.JngWcRN42h ++ rm /tmp/tmp.2fNhGFhsEL /tmp/tmp.JngWcRN42h ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 22 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4UKDpBbNCx +++ mktemp ++ local LAST_ERR=/tmp/tmp.1fBEFqo4tK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4UKDpBbNCx ++ cat /tmp/tmp.1fBEFqo4tK ++ rm /tmp/tmp.4UKDpBbNCx /tmp/tmp.1fBEFqo4tK ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 23 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZGt13wL0GN +++ mktemp ++ local LAST_ERR=/tmp/tmp.e0Y1Ly1CWW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZGt13wL0GN ++ cat /tmp/tmp.e0Y1Ly1CWW ++ rm /tmp/tmp.ZGt13wL0GN /tmp/tmp.e0Y1Ly1CWW ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 24 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kzj0K86Z6I +++ mktemp ++ local LAST_ERR=/tmp/tmp.Zc3kVb5DYL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kzj0K86Z6I ++ cat /tmp/tmp.Zc3kVb5DYL ++ rm /tmp/tmp.kzj0K86Z6I /tmp/tmp.Zc3kVb5DYL ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_kubectl statefulset/some-name-rs0 -sharding-enabled + local resource=statefulset/some-name-rs0 + local postfix=-sharding-enabled + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-enabled.yml + local new_result=/tmp/tmp.wBdHIypxeu/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-enabled-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-21694", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.39kO77pjTH ++ mktemp + local LAST_ERR=/tmp/tmp.ydHC9QYhwV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.39kO77pjTH + cat /tmp/tmp.ydHC9QYhwV + rm /tmp/tmp.39kO77pjTH /tmp/tmp.ydHC9QYhwV + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-enabled.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-enabled.yml /tmp/tmp.wBdHIypxeu/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-10-06T21:01:35+0000] compare_kubectl: statefulset/some-name-rs0 OK + compare_kubectl statefulset/some-name-cfg + local resource=statefulset/some-name-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml + local new_result=/tmp/tmp.wBdHIypxeu/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-21694", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.HPLT7NP5Kz ++ mktemp + local LAST_ERR=/tmp/tmp.rVDji3PeJX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HPLT7NP5Kz + cat /tmp/tmp.rVDji3PeJX + rm /tmp/tmp.HPLT7NP5Kz /tmp/tmp.rVDji3PeJX + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml /tmp/tmp.wBdHIypxeu/statefulset_some-name-cfg.yml + log 'compare_kubectl: statefulset/some-name-cfg OK' + set +o xtrace [2025-10-06T21:01:36+0000] compare_kubectl: statefulset/some-name-cfg OK + compare_kubectl statefulset/some-name-mongos '' + local resource=statefulset/some-name-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml + local new_result=/tmp/tmp.wBdHIypxeu/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-21694", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.VSeJKgjJpe ++ mktemp + local LAST_ERR=/tmp/tmp.n21alg8IBj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VSeJKgjJpe + cat /tmp/tmp.n21alg8IBj + rm /tmp/tmp.VSeJKgjJpe /tmp/tmp.n21alg8IBj + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-mongos.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.wBdHIypxeu/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml /tmp/tmp.wBdHIypxeu/statefulset_some-name-mongos.yml + log 'compare_kubectl: statefulset/some-name-mongos OK' + set +o xtrace [2025-10-06T21:01:37+0000] compare_kubectl: statefulset/some-name-mongos OK + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.expose-sharded-21694 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ awk -F: '{print $2}' ++ echo .svc.cluster.local + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OEp8ujWAaV +++ mktemp ++ local LAST_ERR=/tmp/tmp.K9IHhDk6c5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OEp8ujWAaV ++ cat /tmp/tmp.K9IHhDk6c5 ++ rm /tmp/tmp.OEp8ujWAaV /tmp/tmp.K9IHhDk6c5 ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.H2Fn4HnpSS ++ mktemp + local LAST_ERR=/tmp/tmp.E2yoIkjr3k + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.H2Fn4HnpSS Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("2b28e0ab-0660-4c16-8e1a-fb734c0673c3") } Percona Server for MongoDB server version: v7.0.24-13 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.E2yoIkjr3k + rm /tmp/tmp.H2Fn4HnpSS /tmp/tmp.E2yoIkjr3k + return 0 + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-21694 mongodb + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SRilaKjHlb +++ mktemp ++ local LAST_ERR=/tmp/tmp.PTcjXUXKM4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SRilaKjHlb ++ cat /tmp/tmp.PTcjXUXKM4 ++ rm /tmp/tmp.SRilaKjHlb /tmp/tmp.PTcjXUXKM4 ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-21694 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.8PAYANJdEL ++ mktemp + local LAST_ERR=/tmp/tmp.HfrxgyCDoz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8PAYANJdEL Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.expose-sharded-21694.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("dea3bb36-60f0-4269-8c0d-76e94e9d5f83") } Percona Server for MongoDB server version: v7.0.24-13 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.HfrxgyCDoz + rm /tmp/tmp.8PAYANJdEL /tmp/tmp.HfrxgyCDoz + return 0 + run_mongos 'sh.enableSharding("myApp","rs0")' clusterAdmin:clusterAdmin123456@some-name-mongos.expose-sharded-21694 + local 'command=sh.enableSharding("myApp","rs0")' + local uri=clusterAdmin:clusterAdmin123456@some-name-mongos.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aqz7M1Lwdi +++ mktemp ++ local LAST_ERR=/tmp/tmp.q6Mc6lH1Te ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aqz7M1Lwdi ++ cat /tmp/tmp.q6Mc6lH1Te ++ rm /tmp/tmp.aqz7M1Lwdi /tmp/tmp.q6Mc6lH1Te ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''sh.enableSharding("myApp","rs0")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.pWsR7dvCbd ++ mktemp + local LAST_ERR=/tmp/tmp.LZFYTWRpRO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''sh.enableSharding("myApp","rs0")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pWsR7dvCbd Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("448ad856-452f-4eda-bf3b-37ff0d138179") } Percona Server for MongoDB server version: v7.0.24-13 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1759784504, 8), "signature" : { "hash" : BinData(0,"89ZQSOY4yWfPamDlt5hvcIMH4g4="), "keyId" : NumberLong("7558213671462109204") } }, "operationTime" : Timestamp(1759784504, 2) } bye + cat /tmp/tmp.LZFYTWRpRO + rm /tmp/tmp.pWsR7dvCbd /tmp/tmp.LZFYTWRpRO + return 0 + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.expose-sharded-21694 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NLxWunKLpp +++ mktemp ++ local LAST_ERR=/tmp/tmp.dp2eAQ2HRk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NLxWunKLpp ++ cat /tmp/tmp.dp2eAQ2HRk ++ rm /tmp/tmp.NLxWunKLpp /tmp/tmp.dp2eAQ2HRk ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.50zmGxdoon ++ mktemp + local LAST_ERR=/tmp/tmp.fVua7GVxqS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.50zmGxdoon Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("aca0e598-5016-42ad-9e0a-ac70e6bf0359") } Percona Server for MongoDB server version: v7.0.24-13 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.fVua7GVxqS + rm /tmp/tmp.50zmGxdoon /tmp/tmp.fVua7GVxqS + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-21694 + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-21694 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-21694 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XhDdCRlaHn +++ mktemp ++ local LAST_ERR=/tmp/tmp.HZQNjZxapp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XhDdCRlaHn ++ cat /tmp/tmp.HZQNjZxapp ++ rm /tmp/tmp.XhDdCRlaHn /tmp/tmp.HZQNjZxapp ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.A1mvpbmwLv ++ mktemp + local LAST_ERR=/tmp/tmp.McGQmn8HfT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.A1mvpbmwLv + cat /tmp/tmp.McGQmn8HfT + rm /tmp/tmp.A1mvpbmwLv /tmp/tmp.McGQmn8HfT + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find.json /tmp/tmp.wBdHIypxeu/find + desc 'Unexposed -> Exposed, ClusterIP' + set +o xtrace ----------------------------------------------------------------------------------- Unexposed -> Exposed, ClusterIP ----------------------------------------------------------------------------------- + expose_cluster ClusterIP + expose_type=ClusterIP + expose_status=true + kubectl_bin patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "type" : "ClusterIP" } }]' ++ mktemp + local LAST_OUT=/tmp/tmp.lWBtepQbMG ++ mktemp + local LAST_ERR=/tmp/tmp.Edn0CjGpo9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "type" : "ClusterIP" } }]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lWBtepQbMG perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.Edn0CjGpo9 + rm /tmp/tmp.lWBtepQbMG /tmp/tmp.Edn0CjGpo9 + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DhI2KISd3X +++ mktemp ++ local LAST_ERR=/tmp/tmp.GYVhe1r6Fr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DhI2KISd3X ++ cat /tmp/tmp.GYVhe1r6Fr ++ rm /tmp/tmp.DhI2KISd3X /tmp/tmp.GYVhe1r6Fr ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PH7Nnet6L0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.mPGG2yFpf5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PH7Nnet6L0 ++ cat /tmp/tmp.mPGG2yFpf5 ++ rm /tmp/tmp.PH7Nnet6L0 /tmp/tmp.mPGG2yFpf5 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EQSSENrbOK +++ mktemp ++ local LAST_ERR=/tmp/tmp.yPMngO4BN4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EQSSENrbOK ++ cat /tmp/tmp.yPMngO4BN4 ++ rm /tmp/tmp.EQSSENrbOK /tmp/tmp.yPMngO4BN4 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QQgqaJhVxs +++ mktemp ++ local LAST_ERR=/tmp/tmp.Tc57Rtpz8d ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QQgqaJhVxs ++ cat /tmp/tmp.Tc57Rtpz8d ++ rm /tmp/tmp.QQgqaJhVxs /tmp/tmp.Tc57Rtpz8d ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CVFRn9zaME +++ mktemp ++ local LAST_ERR=/tmp/tmp.jncx9xvlD4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CVFRn9zaME ++ cat /tmp/tmp.jncx9xvlD4 ++ rm /tmp/tmp.CVFRn9zaME /tmp/tmp.jncx9xvlD4 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.59dQPdRrOp +++ mktemp ++ local LAST_ERR=/tmp/tmp.oL70xXeWHW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.59dQPdRrOp ++ cat /tmp/tmp.oL70xXeWHW ++ rm /tmp/tmp.59dQPdRrOp /tmp/tmp.oL70xXeWHW ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4HHg214Q5d +++ mktemp ++ local LAST_ERR=/tmp/tmp.S5sjvZBSNz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4HHg214Q5d ++ cat /tmp/tmp.S5sjvZBSNz ++ rm /tmp/tmp.4HHg214Q5d /tmp/tmp.S5sjvZBSNz ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.64poUerAK6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.53qI3JGDjW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.64poUerAK6 ++ cat /tmp/tmp.53qI3JGDjW ++ rm /tmp/tmp.64poUerAK6 /tmp/tmp.53qI3JGDjW ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dyDTzJlemg +++ mktemp ++ local LAST_ERR=/tmp/tmp.opopCX4A3P ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dyDTzJlemg ++ cat /tmp/tmp.opopCX4A3P ++ rm /tmp/tmp.dyDTzJlemg /tmp/tmp.opopCX4A3P ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ra7rxRpfzN +++ mktemp ++ local LAST_ERR=/tmp/tmp.h791eXbGh6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ra7rxRpfzN ++ cat /tmp/tmp.h791eXbGh6 ++ rm /tmp/tmp.Ra7rxRpfzN /tmp/tmp.h791eXbGh6 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.expose-sharded-21694 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5d7U4yw4Bt +++ mktemp ++ local LAST_ERR=/tmp/tmp.bPQcAWxyRb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5d7U4yw4Bt ++ cat /tmp/tmp.bPQcAWxyRb ++ rm /tmp/tmp.5d7U4yw4Bt /tmp/tmp.bPQcAWxyRb ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.e62rzy68Xt ++ mktemp + local LAST_ERR=/tmp/tmp.A5cnV7ygDi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.e62rzy68Xt Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("506c63c8-0b8e-447a-b1c3-8696640ed610") } Percona Server for MongoDB server version: v7.0.24-13 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.A5cnV7ygDi + rm /tmp/tmp.e62rzy68Xt /tmp/tmp.A5cnV7ygDi + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-21694 -2nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-21694 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-21694 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HjsvfTuwVc +++ mktemp ++ local LAST_ERR=/tmp/tmp.u3zgyIp4VM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HjsvfTuwVc ++ cat /tmp/tmp.u3zgyIp4VM ++ rm /tmp/tmp.HjsvfTuwVc /tmp/tmp.u3zgyIp4VM ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.jJWO5ZBfIj ++ mktemp + local LAST_ERR=/tmp/tmp.5oHZ6iUN62 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jJWO5ZBfIj + cat /tmp/tmp.5oHZ6iUN62 + rm /tmp/tmp.jJWO5ZBfIj /tmp/tmp.5oHZ6iUN62 + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-2nd.json /tmp/tmp.wBdHIypxeu/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-10-06T21:02:56+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 + local driver=mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.thjyvARrBr +++ mktemp ++ local LAST_ERR=/tmp/tmp.8EThGETKO1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.thjyvARrBr ++ cat /tmp/tmp.8EThGETKO1 ++ rm /tmp/tmp.thjyvARrBr /tmp/tmp.8EThGETKO1 ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.VcLdZb8uXh ++ mktemp + local LAST_ERR=/tmp/tmp.FYZE0VsDwh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VcLdZb8uXh + cat /tmp/tmp.FYZE0VsDwh + rm /tmp/tmp.VcLdZb8uXh /tmp/tmp.FYZE0VsDwh + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-2nd.json /tmp/tmp.wBdHIypxeu/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-10-06T21:02:59+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.V9GkchYG69 +++ mktemp ++ local LAST_ERR=/tmp/tmp.CKDozgKhX8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.V9GkchYG69 ++ cat /tmp/tmp.CKDozgKhX8 ++ rm /tmp/tmp.V9GkchYG69 /tmp/tmp.CKDozgKhX8 ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.wdpMwp4y68 ++ mktemp + local LAST_ERR=/tmp/tmp.1GMxFyU2At + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wdpMwp4y68 + cat /tmp/tmp.1GMxFyU2At + rm /tmp/tmp.wdpMwp4y68 /tmp/tmp.1GMxFyU2At + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-2nd.json /tmp/tmp.wBdHIypxeu/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-10-06T21:03:01+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.RAEg8OFYeE +++ mktemp ++ local LAST_ERR=/tmp/tmp.VjcSN32Bex ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RAEg8OFYeE ++ cat /tmp/tmp.VjcSN32Bex ++ rm /tmp/tmp.RAEg8OFYeE /tmp/tmp.VjcSN32Bex ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.nGGOgJJV3Z ++ mktemp + local LAST_ERR=/tmp/tmp.R74kVVxOYA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nGGOgJJV3Z + cat /tmp/tmp.R74kVVxOYA + rm /tmp/tmp.nGGOgJJV3Z /tmp/tmp.R74kVVxOYA + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-2nd.json /tmp/tmp.wBdHIypxeu/find-2nd + compare_mongo_config some-name expose-sharded-21694 + cluster=some-name + namespace=expose-sharded-21694 + enable_expose=true + desc 'Compare mongo config' + set +o xtrace ----------------------------------------------------------------------------------- Compare mongo config ----------------------------------------------------------------------------------- + cfg_0_endpoint=some-name-cfg-0.some-name-cfg.expose-sharded-21694.svc.cluster.local ++ run_mongo 'var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-cfg-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })' clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-21694 ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ local 'command=var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-cfg-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-21694 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp egrep: warning: egrep is obsolescent; using grep -E +++ local LAST_OUT=/tmp/tmp.KMdVjJdB2U ++++ mktemp +++ local LAST_ERR=/tmp/tmp.rbKUUgxNqp +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.KMdVjJdB2U +++ cat /tmp/tmp.rbKUUgxNqp +++ rm /tmp/tmp.KMdVjJdB2U /tmp/tmp.rbKUUgxNqp +++ return 0 ++ local client_container=psmdb-client-66f577db5f-6lfsh ++ local mongo_flag= ++ [[ clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-21694 == *cfg* ]] ++ replica_set=cfg ++ kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-cfg-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fBLP034lwN +++ mktemp ++ local LAST_ERR=/tmp/tmp.hIfxlWSCAW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-cfg-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fBLP034lwN ++ cat /tmp/tmp.hIfxlWSCAW ++ rm /tmp/tmp.fBLP034lwN /tmp/tmp.hIfxlWSCAW ++ return 0 + cfg_0_endpoint_actual=some-name-cfg-0.some-name-cfg.expose-sharded-21694.svc.cluster.local:27017 + rs0_0_endpoint=some-name-rs0-0.some-name-rs0.expose-sharded-21694.svc.cluster.local ++ run_mongo 'var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-rs0-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })' clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-21694 ++ local 'command=var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-rs0-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-21694 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp egrep: warning: egrep is obsolescent; using grep -E +++ local LAST_OUT=/tmp/tmp.z9q2oMy8xK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UizMPPIkF1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.z9q2oMy8xK +++ cat /tmp/tmp.UizMPPIkF1 +++ rm /tmp/tmp.z9q2oMy8xK /tmp/tmp.UizMPPIkF1 +++ return 0 ++ local client_container=psmdb-client-66f577db5f-6lfsh ++ local mongo_flag= ++ [[ clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-21694 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-rs0-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4gwvUCBGEM +++ mktemp ++ local LAST_ERR=/tmp/tmp.H5kTxVvvTl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-rs0-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4gwvUCBGEM ++ cat /tmp/tmp.H5kTxVvvTl ++ rm /tmp/tmp.4gwvUCBGEM /tmp/tmp.H5kTxVvvTl ++ return 0 + rs0_0_endpoint_actual=some-name-rs0-0.some-name-rs0.expose-sharded-21694.svc.cluster.local:27017 + [[ some-name-rs0-0.some-name-rs0.expose-sharded-21694.svc.cluster.local:27017 != \s\o\m\e\-\n\a\m\e\-\r\s\0\-\0\.\s\o\m\e\-\n\a\m\e\-\r\s\0\.\e\x\p\o\s\e\-\s\h\a\r\d\e\d\-\2\1\6\9\4\.\s\v\c\.\c\l\u\s\t\e\r\.\l\o\c\a\l\:\2\7\0\1\7 ]] + [[ some-name-cfg-0.some-name-cfg.expose-sharded-21694.svc.cluster.local:27017 != \s\o\m\e\-\n\a\m\e\-\c\f\g\-\0\.\s\o\m\e\-\n\a\m\e\-\c\f\g\.\e\x\p\o\s\e\-\s\h\a\r\d\e\d\-\2\1\6\9\4\.\s\v\c\.\c\l\u\s\t\e\r\.\l\o\c\a\l\:\2\7\0\1\7 ]] + desc 'Exposed, ClusterIP -> LoadBalancer' + set +o xtrace ----------------------------------------------------------------------------------- Exposed, ClusterIP -> LoadBalancer ----------------------------------------------------------------------------------- + expose_cluster LoadBalancer + expose_type=LoadBalancer + expose_status=true + kubectl_bin patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "type" : "LoadBalancer" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "LoadBalancer" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "type" : "LoadBalancer" } }]' ++ mktemp + local LAST_OUT=/tmp/tmp.E1CfvVwfNT ++ mktemp + local LAST_ERR=/tmp/tmp.B0hM2a6cf2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "type" : "LoadBalancer" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "LoadBalancer" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "type" : "LoadBalancer" } }]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.E1CfvVwfNT perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.B0hM2a6cf2 + rm /tmp/tmp.E1CfvVwfNT /tmp/tmp.B0hM2a6cf2 + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.grtay3nHMA +++ mktemp ++ local LAST_ERR=/tmp/tmp.Q1nXqXSTeI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.grtay3nHMA ++ cat /tmp/tmp.Q1nXqXSTeI ++ rm /tmp/tmp.grtay3nHMA /tmp/tmp.Q1nXqXSTeI ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wPrg0dJYf9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.wMJ7nOIiQ3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wPrg0dJYf9 ++ cat /tmp/tmp.wMJ7nOIiQ3 ++ rm /tmp/tmp.wPrg0dJYf9 /tmp/tmp.wMJ7nOIiQ3 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hZcZYyFZZY +++ mktemp ++ local LAST_ERR=/tmp/tmp.CDAFGOehY7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hZcZYyFZZY ++ cat /tmp/tmp.CDAFGOehY7 ++ rm /tmp/tmp.hZcZYyFZZY /tmp/tmp.CDAFGOehY7 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zNXazexhcS +++ mktemp ++ local LAST_ERR=/tmp/tmp.wzHXdHizUs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zNXazexhcS ++ cat /tmp/tmp.wzHXdHizUs ++ rm /tmp/tmp.zNXazexhcS /tmp/tmp.wzHXdHizUs ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CB9veo4h23 +++ mktemp ++ local LAST_ERR=/tmp/tmp.xVvTgUkg35 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CB9veo4h23 ++ cat /tmp/tmp.xVvTgUkg35 ++ rm /tmp/tmp.CB9veo4h23 /tmp/tmp.xVvTgUkg35 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.toiPPGKl2c +++ mktemp ++ local LAST_ERR=/tmp/tmp.yeXfkGsNCF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.toiPPGKl2c ++ cat /tmp/tmp.yeXfkGsNCF ++ rm /tmp/tmp.toiPPGKl2c /tmp/tmp.yeXfkGsNCF ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fXgYLMDajT +++ mktemp ++ local LAST_ERR=/tmp/tmp.wk6phsyKBz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fXgYLMDajT ++ cat /tmp/tmp.wk6phsyKBz ++ rm /tmp/tmp.fXgYLMDajT /tmp/tmp.wk6phsyKBz ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XqhVGtclUu +++ mktemp ++ local LAST_ERR=/tmp/tmp.Nx1XiCpYcl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XqhVGtclUu ++ cat /tmp/tmp.Nx1XiCpYcl ++ rm /tmp/tmp.XqhVGtclUu /tmp/tmp.Nx1XiCpYcl ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Jtk387lnXo +++ mktemp ++ local LAST_ERR=/tmp/tmp.BXdiOvTE9n ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Jtk387lnXo ++ cat /tmp/tmp.BXdiOvTE9n ++ rm /tmp/tmp.Jtk387lnXo /tmp/tmp.BXdiOvTE9n ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3d1FmdThm3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ae5lAoVFks ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3d1FmdThm3 ++ cat /tmp/tmp.ae5lAoVFks ++ rm /tmp/tmp.3d1FmdThm3 /tmp/tmp.ae5lAoVFks ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + run_mongos 'use myApp\n db.test.insert({ x: 100502 })' myApp:myPass@some-name-mongos.expose-sharded-21694 + local 'command=use myApp\n db.test.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4z41ZO7d5O +++ mktemp ++ local LAST_ERR=/tmp/tmp.Cl5oh97cOa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4z41ZO7d5O ++ cat /tmp/tmp.Cl5oh97cOa ++ rm /tmp/tmp.4z41ZO7d5O /tmp/tmp.Cl5oh97cOa ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.bYKzxbsG6k ++ mktemp + local LAST_ERR=/tmp/tmp.Gt6CBZ0rN3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bYKzxbsG6k Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("d279e1cf-f9b1-42cf-9b7b-c4784f48950c") } Percona Server for MongoDB server version: v7.0.24-13 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.Gt6CBZ0rN3 + rm /tmp/tmp.bYKzxbsG6k /tmp/tmp.Gt6CBZ0rN3 + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-21694 -3nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-21694 + local postfix=-3nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-21694 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@some-name-mongos.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5Xr9AFpX8V +++ mktemp ++ local LAST_ERR=/tmp/tmp.SbcUgwXjE9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5Xr9AFpX8V ++ cat /tmp/tmp.SbcUgwXjE9 ++ rm /tmp/tmp.5Xr9AFpX8V /tmp/tmp.SbcUgwXjE9 ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.5ETaZTat7M ++ mktemp + local LAST_ERR=/tmp/tmp.sylFkQ4ZEs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5ETaZTat7M + cat /tmp/tmp.sylFkQ4ZEs + rm /tmp/tmp.5ETaZTat7M /tmp/tmp.sylFkQ4ZEs + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-3nd.json /tmp/tmp.wBdHIypxeu/find-3nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 -3nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 + local postfix=-3nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-10-06T21:04:15+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.A5SqnYoJI1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.906ko2oGV0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.A5SqnYoJI1 ++ cat /tmp/tmp.906ko2oGV0 ++ rm /tmp/tmp.A5SqnYoJI1 /tmp/tmp.906ko2oGV0 ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.5K3kEzoveg ++ mktemp + local LAST_ERR=/tmp/tmp.wMSlj4rlJJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5K3kEzoveg + cat /tmp/tmp.wMSlj4rlJJ + rm /tmp/tmp.5K3kEzoveg /tmp/tmp.wMSlj4rlJJ + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-3nd.json /tmp/tmp.wBdHIypxeu/find-3nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 -3nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 + local postfix=-3nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-10-06T21:04:17+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.Ci9S6KQxfP +++ mktemp ++ local LAST_ERR=/tmp/tmp.s6ygexUnJ5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ci9S6KQxfP ++ cat /tmp/tmp.s6ygexUnJ5 ++ rm /tmp/tmp.Ci9S6KQxfP /tmp/tmp.s6ygexUnJ5 ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.3GHnB1ctSG ++ mktemp + local LAST_ERR=/tmp/tmp.1P1eAlq5ca + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3GHnB1ctSG + cat /tmp/tmp.1P1eAlq5ca + rm /tmp/tmp.3GHnB1ctSG /tmp/tmp.1P1eAlq5ca + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-3nd.json /tmp/tmp.wBdHIypxeu/find-3nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 -3nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 + local postfix=-3nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-10-06T21:04:19+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.3GxmdTZJ7e +++ mktemp ++ local LAST_ERR=/tmp/tmp.sZLwcWMceO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3GxmdTZJ7e ++ cat /tmp/tmp.sZLwcWMceO ++ rm /tmp/tmp.3GxmdTZJ7e /tmp/tmp.sZLwcWMceO ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ggtVB295zt ++ mktemp + local LAST_ERR=/tmp/tmp.SRFQDxamyq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ggtVB295zt + cat /tmp/tmp.SRFQDxamyq + rm /tmp/tmp.ggtVB295zt /tmp/tmp.SRFQDxamyq + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-3nd.json /tmp/tmp.wBdHIypxeu/find-3nd + sleep 60 + desc 'Pause Exposed cluster (LoadBalancer)' + set +o xtrace ----------------------------------------------------------------------------------- Pause Exposed cluster (LoadBalancer) ----------------------------------------------------------------------------------- + stop_cluster some-name + local cluster_name=some-name + local max_wait_time=120 + local passed_time=0 + local sleep_time=1 + kubectl_bin patch psmdb some-name --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.Z49AvVPnaB ++ mktemp + local LAST_ERR=/tmp/tmp.hqacEPbmN0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Z49AvVPnaB perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.hqacEPbmN0 + rm /tmp/tmp.Z49AvVPnaB /tmp/tmp.hqacEPbmN0 + return 0 + set +x Waiting for cluster stop...............Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found + start_cluster some-name + local cluster_name=some-name + kubectl_bin patch psmdb some-name --type json '-p=[{"op":"add","path":"/spec/pause","value":false}]' ++ mktemp + local LAST_OUT=/tmp/tmp.qiXGlq7KPU ++ mktemp + local LAST_ERR=/tmp/tmp.CZvy0cZMuo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type json '-p=[{"op":"add","path":"/spec/pause","value":false}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qiXGlq7KPU perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.CZvy0cZMuo + rm /tmp/tmp.qiXGlq7KPU /tmp/tmp.CZvy0cZMuo + return 0 + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0KCDZIoYDY +++ mktemp ++ local LAST_ERR=/tmp/tmp.iHSVr1NUvG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0KCDZIoYDY ++ cat /tmp/tmp.iHSVr1NUvG ++ rm /tmp/tmp.0KCDZIoYDY /tmp/tmp.iHSVr1NUvG ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.b0GhonbdcL +++ mktemp ++ local LAST_ERR=/tmp/tmp.QHnHDQmls5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.b0GhonbdcL ++ cat /tmp/tmp.QHnHDQmls5 ++ rm /tmp/tmp.b0GhonbdcL /tmp/tmp.QHnHDQmls5 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xL0dNItfmZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.jDkHc9qdE6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xL0dNItfmZ ++ cat /tmp/tmp.jDkHc9qdE6 ++ rm /tmp/tmp.xL0dNItfmZ /tmp/tmp.jDkHc9qdE6 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WBS4AViLrj +++ mktemp ++ local LAST_ERR=/tmp/tmp.AahbKunaWC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WBS4AViLrj ++ cat /tmp/tmp.AahbKunaWC ++ rm /tmp/tmp.WBS4AViLrj /tmp/tmp.AahbKunaWC ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oeoXkKHxJ9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.3PxgM7ehwv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oeoXkKHxJ9 ++ cat /tmp/tmp.3PxgM7ehwv ++ rm /tmp/tmp.oeoXkKHxJ9 /tmp/tmp.3PxgM7ehwv ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.o3zgcgItxz +++ mktemp ++ local LAST_ERR=/tmp/tmp.583jiG3huq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.o3zgcgItxz ++ cat /tmp/tmp.583jiG3huq ++ rm /tmp/tmp.o3zgcgItxz /tmp/tmp.583jiG3huq ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3neDdujIbm +++ mktemp ++ local LAST_ERR=/tmp/tmp.C7FOIl8JNJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3neDdujIbm ++ cat /tmp/tmp.C7FOIl8JNJ ++ rm /tmp/tmp.3neDdujIbm /tmp/tmp.C7FOIl8JNJ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yTJMTRaXo5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.TH2ef4XY5J ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yTJMTRaXo5 ++ cat /tmp/tmp.TH2ef4XY5J ++ rm /tmp/tmp.yTJMTRaXo5 /tmp/tmp.TH2ef4XY5J ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.H3GkJ5fr4F +++ mktemp ++ local LAST_ERR=/tmp/tmp.4b2rFzkEzU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.H3GkJ5fr4F ++ cat /tmp/tmp.4b2rFzkEzU ++ rm /tmp/tmp.H3GkJ5fr4F /tmp/tmp.4b2rFzkEzU ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Z54ZVjKfIh +++ mktemp ++ local LAST_ERR=/tmp/tmp.3IPz2deotV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Z54ZVjKfIh ++ cat /tmp/tmp.3IPz2deotV ++ rm /tmp/tmp.Z54ZVjKfIh /tmp/tmp.3IPz2deotV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Po4qxyQuSa +++ mktemp ++ local LAST_ERR=/tmp/tmp.bh73jL9ncB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Po4qxyQuSa ++ cat /tmp/tmp.bh73jL9ncB ++ rm /tmp/tmp.Po4qxyQuSa /tmp/tmp.bh73jL9ncB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xknNYx1RuR +++ mktemp ++ local LAST_ERR=/tmp/tmp.it1Hs54FY9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xknNYx1RuR ++ cat /tmp/tmp.it1Hs54FY9 ++ rm /tmp/tmp.xknNYx1RuR /tmp/tmp.it1Hs54FY9 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Wn3ultCyBT +++ mktemp ++ local LAST_ERR=/tmp/tmp.9zbrbMR33S ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Wn3ultCyBT ++ cat /tmp/tmp.9zbrbMR33S ++ rm /tmp/tmp.Wn3ultCyBT /tmp/tmp.9zbrbMR33S ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + run_mongos 'use myApp\n db.test.insert({ x: 100503 })' myApp:myPass@some-name-mongos.expose-sharded-21694 + local 'command=use myApp\n db.test.insert({ x: 100503 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pscJjFoTRF +++ mktemp ++ local LAST_ERR=/tmp/tmp.PzeuWOKr2y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pscJjFoTRF ++ cat /tmp/tmp.PzeuWOKr2y ++ rm /tmp/tmp.pscJjFoTRF /tmp/tmp.PzeuWOKr2y ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.1gLQvSDg5Z ++ mktemp + local LAST_ERR=/tmp/tmp.EzfTz2ITp2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1gLQvSDg5Z Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("2ff9809e-add4-4243-a6db-df761f14a982") } Percona Server for MongoDB server version: v7.0.24-13 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.EzfTz2ITp2 + rm /tmp/tmp.1gLQvSDg5Z /tmp/tmp.EzfTz2ITp2 + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-21694 -4nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-21694 + local postfix=-4nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-21694 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6gjDcjoTqp +++ mktemp ++ local LAST_ERR=/tmp/tmp.LXjUZZjH4E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6gjDcjoTqp ++ cat /tmp/tmp.LXjUZZjH4E ++ rm /tmp/tmp.6gjDcjoTqp /tmp/tmp.LXjUZZjH4E ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.IhURz5Kh47 ++ mktemp + local LAST_ERR=/tmp/tmp.8KVwFJrCu5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IhURz5Kh47 + cat /tmp/tmp.8KVwFJrCu5 + rm /tmp/tmp.IhURz5Kh47 /tmp/tmp.8KVwFJrCu5 + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-4nd.json /tmp/tmp.wBdHIypxeu/find-4nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 -4nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 + local postfix=-4nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-10-06T21:16:50+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.v6p1u5cpk2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.RUQ8JVzMWH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v6p1u5cpk2 ++ cat /tmp/tmp.RUQ8JVzMWH ++ rm /tmp/tmp.v6p1u5cpk2 /tmp/tmp.RUQ8JVzMWH ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ipttf4QwT5 ++ mktemp + local LAST_ERR=/tmp/tmp.yIgxzIqqbk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ipttf4QwT5 + cat /tmp/tmp.yIgxzIqqbk + rm /tmp/tmp.ipttf4QwT5 /tmp/tmp.yIgxzIqqbk + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-4nd.json /tmp/tmp.wBdHIypxeu/find-4nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 -4nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 + local postfix=-4nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-10-06T21:16:52+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.rIzWDKKSVp +++ mktemp ++ local LAST_ERR=/tmp/tmp.gokQTk0NQB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rIzWDKKSVp ++ cat /tmp/tmp.gokQTk0NQB ++ rm /tmp/tmp.rIzWDKKSVp /tmp/tmp.gokQTk0NQB ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.9ILkwpoIYt ++ mktemp + local LAST_ERR=/tmp/tmp.HQ5nVTwT9V + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9ILkwpoIYt + cat /tmp/tmp.HQ5nVTwT9V + rm /tmp/tmp.9ILkwpoIYt /tmp/tmp.HQ5nVTwT9V + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-4nd.json /tmp/tmp.wBdHIypxeu/find-4nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 -4nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 + local postfix=-4nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-10-06T21:16:55+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Mf3rZBLNtU egrep: warning: egrep is obsolescent; using grep -E +++ mktemp ++ local LAST_ERR=/tmp/tmp.8PEhlgQX33 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Mf3rZBLNtU ++ cat /tmp/tmp.8PEhlgQX33 ++ rm /tmp/tmp.Mf3rZBLNtU /tmp/tmp.8PEhlgQX33 ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.8t1EeT3veL ++ mktemp + local LAST_ERR=/tmp/tmp.y4ixyvPXNw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8t1EeT3veL + cat /tmp/tmp.y4ixyvPXNw + rm /tmp/tmp.8t1EeT3veL /tmp/tmp.y4ixyvPXNw + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-4nd.json /tmp/tmp.wBdHIypxeu/find-4nd + desc 'Exposed, LoadBalancer -> ClusterIP' + set +o xtrace ----------------------------------------------------------------------------------- Exposed, LoadBalancer -> ClusterIP ----------------------------------------------------------------------------------- + expose_cluster ClusterIP + expose_type=ClusterIP + expose_status=true + kubectl_bin patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "type" : "ClusterIP" } }]' ++ mktemp + local LAST_OUT=/tmp/tmp.l1IflPfyB2 ++ mktemp + local LAST_ERR=/tmp/tmp.aSsMwfHQoa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "type" : "ClusterIP" } }]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.l1IflPfyB2 perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.aSsMwfHQoa + rm /tmp/tmp.l1IflPfyB2 /tmp/tmp.aSsMwfHQoa + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vNXCS92qAx +++ mktemp ++ local LAST_ERR=/tmp/tmp.h1TKk8zxjv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vNXCS92qAx ++ cat /tmp/tmp.h1TKk8zxjv ++ rm /tmp/tmp.vNXCS92qAx /tmp/tmp.h1TKk8zxjv ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.T2mZpPPTCQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.GrXGD461NO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.T2mZpPPTCQ ++ cat /tmp/tmp.GrXGD461NO ++ rm /tmp/tmp.T2mZpPPTCQ /tmp/tmp.GrXGD461NO ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KkQaylKmPF +++ mktemp ++ local LAST_ERR=/tmp/tmp.81Smr0HugE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KkQaylKmPF ++ cat /tmp/tmp.81Smr0HugE ++ rm /tmp/tmp.KkQaylKmPF /tmp/tmp.81Smr0HugE ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Tmo3xEO8Ra +++ mktemp ++ local LAST_ERR=/tmp/tmp.9EGWkEXBKb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Tmo3xEO8Ra ++ cat /tmp/tmp.9EGWkEXBKb ++ rm /tmp/tmp.Tmo3xEO8Ra /tmp/tmp.9EGWkEXBKb ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nGaqUGfxzw +++ mktemp ++ local LAST_ERR=/tmp/tmp.7c5SxLvbne ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nGaqUGfxzw ++ cat /tmp/tmp.7c5SxLvbne ++ rm /tmp/tmp.nGaqUGfxzw /tmp/tmp.7c5SxLvbne ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7kOTiheKqD +++ mktemp ++ local LAST_ERR=/tmp/tmp.pmekNdd7bi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7kOTiheKqD ++ cat /tmp/tmp.pmekNdd7bi ++ rm /tmp/tmp.7kOTiheKqD /tmp/tmp.pmekNdd7bi ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fWNdj9cqUe +++ mktemp ++ local LAST_ERR=/tmp/tmp.PfIRNP2pfl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fWNdj9cqUe ++ cat /tmp/tmp.PfIRNP2pfl ++ rm /tmp/tmp.fWNdj9cqUe /tmp/tmp.PfIRNP2pfl ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CSgnwkTs80 +++ mktemp ++ local LAST_ERR=/tmp/tmp.DsYrSb75Q3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CSgnwkTs80 ++ cat /tmp/tmp.DsYrSb75Q3 ++ rm /tmp/tmp.CSgnwkTs80 /tmp/tmp.DsYrSb75Q3 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ucuy5MWSer +++ mktemp ++ local LAST_ERR=/tmp/tmp.yrrcc8opVp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ucuy5MWSer ++ cat /tmp/tmp.yrrcc8opVp ++ rm /tmp/tmp.Ucuy5MWSer /tmp/tmp.yrrcc8opVp ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KHVk29EzgX +++ mktemp ++ local LAST_ERR=/tmp/tmp.4opEtbW9G0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KHVk29EzgX ++ cat /tmp/tmp.4opEtbW9G0 ++ rm /tmp/tmp.KHVk29EzgX /tmp/tmp.4opEtbW9G0 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + run_mongos 'use myApp\n db.test.insert({ x: 100504 })' myApp:myPass@some-name-mongos.expose-sharded-21694 + local 'command=use myApp\n db.test.insert({ x: 100504 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2r0SAdpivr +++ mktemp ++ local LAST_ERR=/tmp/tmp.6LmaAvOwzd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2r0SAdpivr ++ cat /tmp/tmp.6LmaAvOwzd ++ rm /tmp/tmp.2r0SAdpivr /tmp/tmp.6LmaAvOwzd ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100504 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.bts1Y8EEqi ++ mktemp + local LAST_ERR=/tmp/tmp.iOvyITMPg5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100504 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bts1Y8EEqi Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("2c252dbe-4d8e-4aee-be97-a8ad1cb2b985") } Percona Server for MongoDB server version: v7.0.24-13 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.iOvyITMPg5 + rm /tmp/tmp.bts1Y8EEqi /tmp/tmp.iOvyITMPg5 + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-21694 -5nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-21694 + local postfix=-5nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-21694 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TCwINsFMVH +++ mktemp ++ local LAST_ERR=/tmp/tmp.gcmu6aiWkU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TCwINsFMVH ++ cat /tmp/tmp.gcmu6aiWkU ++ rm /tmp/tmp.TCwINsFMVH /tmp/tmp.gcmu6aiWkU ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.PxUYiEGZMB ++ mktemp + local LAST_ERR=/tmp/tmp.T7p0W9GV7c + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PxUYiEGZMB + cat /tmp/tmp.T7p0W9GV7c + rm /tmp/tmp.PxUYiEGZMB /tmp/tmp.T7p0W9GV7c + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-5nd.json /tmp/tmp.wBdHIypxeu/find-5nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 -5nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 + local postfix=-5nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-10-06T21:18:00+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.uac5iy7Vjd +++ mktemp ++ local LAST_ERR=/tmp/tmp.fFTMsVuLj5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uac5iy7Vjd ++ cat /tmp/tmp.fFTMsVuLj5 ++ rm /tmp/tmp.uac5iy7Vjd /tmp/tmp.fFTMsVuLj5 ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.GIUrbh8h4x ++ mktemp + local LAST_ERR=/tmp/tmp.cemWavIv4Z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GIUrbh8h4x + cat /tmp/tmp.cemWavIv4Z + rm /tmp/tmp.GIUrbh8h4x /tmp/tmp.cemWavIv4Z + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-5nd.json /tmp/tmp.wBdHIypxeu/find-5nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 -5nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 + local postfix=-5nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-10-06T21:18:02+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.hJo6Wga6Ls +++ mktemp ++ local LAST_ERR=/tmp/tmp.Cz6shgJRze ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hJo6Wga6Ls ++ cat /tmp/tmp.Cz6shgJRze ++ rm /tmp/tmp.hJo6Wga6Ls /tmp/tmp.Cz6shgJRze ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.jUBzMRvyU9 ++ mktemp + local LAST_ERR=/tmp/tmp.FRavW1gNgD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jUBzMRvyU9 + cat /tmp/tmp.FRavW1gNgD + rm /tmp/tmp.jUBzMRvyU9 /tmp/tmp.FRavW1gNgD + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-5nd.json /tmp/tmp.wBdHIypxeu/find-5nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 -5nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 + local postfix=-5nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-10-06T21:18:05+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.fyBBiwgnvs +++ mktemp ++ local LAST_ERR=/tmp/tmp.LFmy9H1sBA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fyBBiwgnvs ++ cat /tmp/tmp.LFmy9H1sBA ++ rm /tmp/tmp.fyBBiwgnvs /tmp/tmp.LFmy9H1sBA ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.yynYElVICZ ++ mktemp + local LAST_ERR=/tmp/tmp.bwpAY8vNpq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yynYElVICZ + cat /tmp/tmp.bwpAY8vNpq + rm /tmp/tmp.yynYElVICZ /tmp/tmp.bwpAY8vNpq + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-5nd.json /tmp/tmp.wBdHIypxeu/find-5nd + desc 'Exposed -> Unexposed' + set +o xtrace ----------------------------------------------------------------------------------- Exposed -> Unexposed ----------------------------------------------------------------------------------- + expose_cluster ClusterIP false + expose_type=ClusterIP + expose_status=false + kubectl_bin patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": false, "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": false, "type" : "ClusterIP" } }]' ++ mktemp + local LAST_OUT=/tmp/tmp.CFTmD6aSsl ++ mktemp + local LAST_ERR=/tmp/tmp.36R2PKg4vl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": false, "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": false, "type" : "ClusterIP" } }]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CFTmD6aSsl perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.36R2PKg4vl + rm /tmp/tmp.CFTmD6aSsl /tmp/tmp.36R2PKg4vl + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ypHW1i4yKB +++ mktemp ++ local LAST_ERR=/tmp/tmp.RGLHSX6Atp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ypHW1i4yKB ++ cat /tmp/tmp.RGLHSX6Atp ++ rm /tmp/tmp.ypHW1i4yKB /tmp/tmp.RGLHSX6Atp ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.l8leJTsW8W +++ mktemp ++ local LAST_ERR=/tmp/tmp.oEqd4MmapM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.l8leJTsW8W ++ cat /tmp/tmp.oEqd4MmapM ++ rm /tmp/tmp.l8leJTsW8W /tmp/tmp.oEqd4MmapM ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZQljFkk6Ou +++ mktemp ++ local LAST_ERR=/tmp/tmp.rMxvper852 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZQljFkk6Ou ++ cat /tmp/tmp.rMxvper852 ++ rm /tmp/tmp.ZQljFkk6Ou /tmp/tmp.rMxvper852 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.P3zSCQW0qw +++ mktemp ++ local LAST_ERR=/tmp/tmp.UkPwKGNe65 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.P3zSCQW0qw ++ cat /tmp/tmp.UkPwKGNe65 ++ rm /tmp/tmp.P3zSCQW0qw /tmp/tmp.UkPwKGNe65 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rGz787Ym7C +++ mktemp ++ local LAST_ERR=/tmp/tmp.1IxL0l8SS5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rGz787Ym7C ++ cat /tmp/tmp.1IxL0l8SS5 ++ rm /tmp/tmp.rGz787Ym7C /tmp/tmp.1IxL0l8SS5 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0LHMG7KaoG +++ mktemp ++ local LAST_ERR=/tmp/tmp.ELPG36BasB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0LHMG7KaoG ++ cat /tmp/tmp.ELPG36BasB ++ rm /tmp/tmp.0LHMG7KaoG /tmp/tmp.ELPG36BasB ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Edt1nvh07C +++ mktemp ++ local LAST_ERR=/tmp/tmp.mnfzHfT1Dd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Edt1nvh07C ++ cat /tmp/tmp.mnfzHfT1Dd ++ rm /tmp/tmp.Edt1nvh07C /tmp/tmp.mnfzHfT1Dd ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1IFen7943I +++ mktemp ++ local LAST_ERR=/tmp/tmp.RwIcl2sr4X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1IFen7943I ++ cat /tmp/tmp.RwIcl2sr4X ++ rm /tmp/tmp.1IFen7943I /tmp/tmp.RwIcl2sr4X ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ijK93TiT1a +++ mktemp ++ local LAST_ERR=/tmp/tmp.VeD45ziJay ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ijK93TiT1a ++ cat /tmp/tmp.VeD45ziJay ++ rm /tmp/tmp.ijK93TiT1a /tmp/tmp.VeD45ziJay ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zShWiDAIn2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.SDiJ7Bu3Gh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zShWiDAIn2 ++ cat /tmp/tmp.SDiJ7Bu3Gh ++ rm /tmp/tmp.zShWiDAIn2 /tmp/tmp.SDiJ7Bu3Gh ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + run_mongos 'use myApp\n db.test.insert({ x: 100505 })' myApp:myPass@some-name-mongos.expose-sharded-21694 + local 'command=use myApp\n db.test.insert({ x: 100505 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0aselxYjj9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.tTAcOKlZRM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0aselxYjj9 ++ cat /tmp/tmp.tTAcOKlZRM ++ rm /tmp/tmp.0aselxYjj9 /tmp/tmp.tTAcOKlZRM ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100505 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.d1jrTHLlbt ++ mktemp + local LAST_ERR=/tmp/tmp.QesWUdFmEH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100505 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.d1jrTHLlbt Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("3b6d93c6-299b-4d40-9d03-12333671dcdd") } Percona Server for MongoDB server version: v7.0.24-13 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.QesWUdFmEH + rm /tmp/tmp.d1jrTHLlbt /tmp/tmp.QesWUdFmEH + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-21694 -6nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-21694 + local postfix=-6nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-21694 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aibr5AwSmH +++ mktemp ++ local LAST_ERR=/tmp/tmp.3TfAQyc9K5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aibr5AwSmH ++ cat /tmp/tmp.3TfAQyc9K5 ++ rm /tmp/tmp.aibr5AwSmH /tmp/tmp.3TfAQyc9K5 ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.25fB4uTm3e ++ mktemp + local LAST_ERR=/tmp/tmp.SW4aIj0X4S + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-21694.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.25fB4uTm3e + cat /tmp/tmp.SW4aIj0X4S + rm /tmp/tmp.25fB4uTm3e /tmp/tmp.SW4aIj0X4S + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-6nd.json /tmp/tmp.wBdHIypxeu/find-6nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 -6nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 + local postfix=-6nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-10-06T21:19:13+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.2ZQQaEBmr9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.WODOaeEYzu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2ZQQaEBmr9 ++ cat /tmp/tmp.WODOaeEYzu ++ rm /tmp/tmp.2ZQQaEBmr9 /tmp/tmp.WODOaeEYzu ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.6KoLts02Iw ++ mktemp + local LAST_ERR=/tmp/tmp.Dvwm9eUbAl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6KoLts02Iw + cat /tmp/tmp.Dvwm9eUbAl + rm /tmp/tmp.6KoLts02Iw /tmp/tmp.Dvwm9eUbAl + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-6nd.json /tmp/tmp.wBdHIypxeu/find-6nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 -6nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 + local postfix=-6nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-10-06T21:19:17+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.S5dznyFDBX +++ mktemp ++ local LAST_ERR=/tmp/tmp.hIPhnrrImO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.S5dznyFDBX ++ cat /tmp/tmp.hIPhnrrImO ++ rm /tmp/tmp.S5dznyFDBX /tmp/tmp.hIPhnrrImO ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.v4RZ9ueebw ++ mktemp + local LAST_ERR=/tmp/tmp.LdIogL90YN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.v4RZ9ueebw + cat /tmp/tmp.LdIogL90YN + rm /tmp/tmp.v4RZ9ueebw /tmp/tmp.LdIogL90YN + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-6nd.json /tmp/tmp.wBdHIypxeu/find-6nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 -6nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 + local postfix=-6nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-10-06T21:19:19+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.l19qy4mUik +++ mktemp ++ local LAST_ERR=/tmp/tmp.5iCAzKZaq9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.l19qy4mUik ++ cat /tmp/tmp.5iCAzKZaq9 ++ rm /tmp/tmp.l19qy4mUik /tmp/tmp.5iCAzKZaq9 ++ return 0 + local client_container=psmdb-client-66f577db5f-6lfsh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.3WDD4XlHTf ++ mktemp + local LAST_ERR=/tmp/tmp.yRmmpbbdl0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-6lfsh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-21694.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3WDD4XlHTf + cat /tmp/tmp.yRmmpbbdl0 + rm /tmp/tmp.3WDD4XlHTf /tmp/tmp.yRmmpbbdl0 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/expose-sharded/compare/find-6nd.json /tmp/tmp.wBdHIypxeu/find-6nd + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/container-rc.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Qrcnr9pyNa ++ mktemp + local LAST_ERR=/tmp/tmp.bbydeUVC2s + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/container-rc.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Qrcnr9pyNa runtimeclass.node.k8s.io "container-rc" deleted + cat /tmp/tmp.bbydeUVC2s + rm /tmp/tmp.Qrcnr9pyNa /tmp/tmp.bbydeUVC2s + return 0 + destroy expose-sharded-21694 + local namespace=expose-sharded-21694 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.SAQBGMF1IE +++ mktemp ++ local LAST_ERR=/tmp/tmp.CAgJeleizF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SAQBGMF1IE ++ cat /tmp/tmp.CAgJeleizF No resources found in expose-sharded-21694 namespace. ++ rm /tmp/tmp.SAQBGMF1IE /tmp/tmp.CAgJeleizF ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.uQWslyCiIe ++ mktemp + local LAST_ERR=/tmp/tmp.EmR2pfVs9Y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uQWslyCiIe customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.EmR2pfVs9Y + rm /tmp/tmp.uQWslyCiIe /tmp/tmp.EmR2pfVs9Y + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.xjYGd9TvdW ++ mktemp + local LAST_ERR=/tmp/tmp.4gX4buJ9aV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xjYGd9TvdW + cat /tmp/tmp.4gX4buJ9aV + rm /tmp/tmp.xjYGd9TvdW /tmp/tmp.4gX4buJ9aV + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.sZpBZeCPLy ++ mktemp + local LAST_ERR=/tmp/tmp.C1KlSpO1CQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sZpBZeCPLy + cat /tmp/tmp.C1KlSpO1CQ + rm /tmp/tmp.sZpBZeCPLy /tmp/tmp.C1KlSpO1CQ + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.DsWcuIzupD ++ mktemp + local LAST_ERR=/tmp/tmp.j9YMplKdOZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DsWcuIzupD + cat /tmp/tmp.j9YMplKdOZ + rm /tmp/tmp.DsWcuIzupD /tmp/tmp.j9YMplKdOZ + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.eKoBUvfUe0 ++ mktemp + local LAST_ERR=/tmp/tmp.APVnzpD1GW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eKoBUvfUe0 clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.APVnzpD1GW + rm /tmp/tmp.eKoBUvfUe0 /tmp/tmp.APVnzpD1GW + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.9QydrQJY49 ++ mktemp + local LAST_ERR=/tmp/tmp.dzBg05JxlK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.9QydrQJY49 + cat /tmp/tmp.dzBg05JxlK Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.9QydrQJY49 + cat /tmp/tmp.dzBg05JxlK Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.9QydrQJY49 + cat /tmp/tmp.dzBg05JxlK Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.9QydrQJY49 + cat /tmp/tmp.dzBg05JxlK Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.9QydrQJY49 /tmp/tmp.dzBg05JxlK + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace expose-sharded-21694 + rm -rf /tmp/tmp.wBdHIypxeu + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.5h5z0gyNPB + local LAST_OUT=/tmp/tmp.b665HOCu4s ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.Wp1OYqqYok + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.tYUuYqqMR9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace expose-sharded-21694