++ echo 'Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/logs/expose-sharded.log' Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/logs/expose-sharded.log ++ '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP_AZURE= ++ oc get projects ++ kubectl get nodes ++ grep '^minikube' +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 ++ '[' ']' ++ EKS=0 +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep gke WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 ++ '[' v1.30.12-gke.1208000 ']' ++ GKE=1 +++ kubectl version -o json +++ /usr/bin/sed -r 's/[^0-9.]+//g' +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.30 + main + create_infra expose-sharded-3967 + local ns=expose-sharded-3967 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.IBZoIP0cM4 ++ mktemp + local LAST_ERR=/tmp/tmp.HFQx0TeTt8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IBZoIP0cM4 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.HFQx0TeTt8 + rm /tmp/tmp.IBZoIP0cM4 /tmp/tmp.HFQx0TeTt8 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.3gHr2U8kGV ++ mktemp + local LAST_ERR=/tmp/tmp.mrN1x1i1fH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3gHr2U8kGV + cat /tmp/tmp.mrN1x1i1fH + rm /tmp/tmp.3gHr2U8kGV /tmp/tmp.mrN1x1i1fH + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.KV0nPMCr7X ++ mktemp + local LAST_ERR=/tmp/tmp.2bM0E1IHPW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KV0nPMCr7X + cat /tmp/tmp.2bM0E1IHPW + rm /tmp/tmp.KV0nPMCr7X /tmp/tmp.2bM0E1IHPW + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.hMz4d4svZ1 ++ mktemp + local LAST_ERR=/tmp/tmp.c6Fzo3ScHf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hMz4d4svZ1 + cat /tmp/tmp.c6Fzo3ScHf + rm /tmp/tmp.hMz4d4svZ1 /tmp/tmp.c6Fzo3ScHf + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.OBbmgzIPv8 ++ mktemp + local LAST_ERR=/tmp/tmp.0gtfrN0zBY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OBbmgzIPv8 clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.0gtfrN0zBY + rm /tmp/tmp.OBbmgzIPv8 /tmp/tmp.0gtfrN0zBY + return 0 + check_crd_for_deletion PR-1965-0d670696 + local git_tag=PR-1965-0d670696 ++ yq eval .metadata.name ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/bin/sed s/---//g ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1965-0d670696/deploy/crd.yaml + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aNZEGW7Ut1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.aGXKjfAj7S ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.aNZEGW7Ut1 ++ cat /tmp/tmp.aGXKjfAj7S Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.aNZEGW7Ut1 ++ cat /tmp/tmp.aGXKjfAj7S Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.aNZEGW7Ut1 ++ cat /tmp/tmp.aGXKjfAj7S Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.aNZEGW7Ut1 ++ cat /tmp/tmp.aGXKjfAj7S Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.aNZEGW7Ut1 /tmp/tmp.aGXKjfAj7S ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ awk '-F ' '{print $2}' ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// ++ tail -n1 + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl api-resources ++ grep chaos-mesh ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get clusterrole ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + xargs kubectl delete ns + kubectl_bin get ns ++ mktemp + awk '{print$1}' + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + local LAST_OUT=/tmp/tmp.HiTDXCO6d9 ++ mktemp + local LAST_ERR=/tmp/tmp.w8Eh5B5QXW + local exit_status=0 + local LAST_OUT=/tmp/tmp.hvUDX68x4D + local timeout=4 ++ mktemp + local LAST_ERR=/tmp/tmp.wl2zmomv1x + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HiTDXCO6d9 + cat /tmp/tmp.w8Eh5B5QXW + rm /tmp/tmp.HiTDXCO6d9 /tmp/tmp.w8Eh5B5QXW + return 0 namespace "expose-sharded-18119" deleted namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hvUDX68x4D namespace "psmdb-operator" deleted + cat /tmp/tmp.wl2zmomv1x + rm /tmp/tmp.hvUDX68x4D /tmp/tmp.wl2zmomv1x + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.AEIRSmiiJI ++ mktemp + local LAST_ERR=/tmp/tmp.SzgfNEkLp8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AEIRSmiiJI + cat /tmp/tmp.SzgfNEkLp8 + rm /tmp/tmp.AEIRSmiiJI /tmp/tmp.SzgfNEkLp8 + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.1WGDy0XVId ++ mktemp + local LAST_ERR=/tmp/tmp.fO4pontycH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1WGDy0XVId namespace/psmdb-operator created + cat /tmp/tmp.fO4pontycH + rm /tmp/tmp.1WGDy0XVId /tmp/tmp.fO4pontycH + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.VnVPLDQ4zW +++ mktemp ++ local LAST_ERR=/tmp/tmp.29OPx7ryCg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VnVPLDQ4zW ++ cat /tmp/tmp.29OPx7ryCg ++ rm /tmp/tmp.VnVPLDQ4zW /tmp/tmp.29OPx7ryCg ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1965-0d670696-1-cluster1 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.t1Ac2DHOBm ++ mktemp + local LAST_ERR=/tmp/tmp.PQiSaeOZ0z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1965-0d670696-1-cluster1 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.t1Ac2DHOBm Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1965-0d670696-1-cluster1" modified. + cat /tmp/tmp.PQiSaeOZ0z + rm /tmp/tmp.t1Ac2DHOBm /tmp/tmp.PQiSaeOZ0z + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.waAYohGD7f ++ mktemp + local LAST_ERR=/tmp/tmp.Im9jjkOfgK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.waAYohGD7f customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.Im9jjkOfgK + rm /tmp/tmp.waAYohGD7f /tmp/tmp.Im9jjkOfgK + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.ss37XpUt8C ++ mktemp + local LAST_ERR=/tmp/tmp.KudL32N1iW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ss37XpUt8C clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.KudL32N1iW + rm /tmp/tmp.ss37XpUt8C /tmp/tmp.KudL32N1iW + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1965-0d670696") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.TGPOB27kGn ++ mktemp + local LAST_ERR=/tmp/tmp.g7BQ818dPN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TGPOB27kGn deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.g7BQ818dPN + rm /tmp/tmp.TGPOB27kGn /tmp/tmp.g7BQ818dPN + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.OPIEEdSZPV +++ mktemp ++ local LAST_ERR=/tmp/tmp.mmV7Yx6DwI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OPIEEdSZPV ++ cat /tmp/tmp.mmV7Yx6DwI ++ rm /tmp/tmp.OPIEEdSZPV /tmp/tmp.mmV7Yx6DwI ++ return 0 + wait_pod percona-server-mongodb-operator-7679559fb5-5n6q6 + local pod=percona-server-mongodb-operator-7679559fb5-5n6q6 + set +o xtrace waiting for pod/percona-server-mongodb-operator-7679559fb5-5n6q6 to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.inVptF57Tx +++ mktemp ++ local LAST_ERR=/tmp/tmp.VsxZig6jZJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.inVptF57Tx ++ cat /tmp/tmp.VsxZig6jZJ ++ rm /tmp/tmp.inVptF57Tx /tmp/tmp.VsxZig6jZJ ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-7679559fb5-5n6q6 ++ mktemp + local LAST_OUT=/tmp/tmp.5mpOGOka6l ++ mktemp + local LAST_ERR=/tmp/tmp.4lBM5LxVAp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs percona-server-mongodb-operator-7679559fb5-5n6q6 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5mpOGOka6l + cat /tmp/tmp.4lBM5LxVAp + rm /tmp/tmp.5mpOGOka6l /tmp/tmp.4lBM5LxVAp + return 0 2025-06-09T02:44:59.369Z INFO setup Manager starting up {"gitCommit": "0d6706965b3ee5c57aa53951860505b0d497f5b1", "gitBranch": "PR-1965-0d670696", "buildTime": "", "goVersion": "go1.24.4", "os": "linux", "arch": "amd64"} + create_namespace expose-sharded-3967 + local namespace=expose-sharded-3967 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl api-resources ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces expose-sharded-3967' + set +o xtrace + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ----------------------------------------------------------------------------------- cleaned up old namespaces expose-sharded-3967 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace expose-sharded-3967 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.6W8d1HWsBF + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.qRhZaM8Cf7 ++ mktemp + local LAST_ERR=/tmp/tmp.9JvgQllcDk + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.AeiN6ojXSh + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + awk '{print$1}' + xargs kubectl delete ns + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace expose-sharded-3967 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qRhZaM8Cf7 + cat /tmp/tmp.AeiN6ojXSh + rm /tmp/tmp.qRhZaM8Cf7 /tmp/tmp.AeiN6ojXSh + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6W8d1HWsBF + cat /tmp/tmp.9JvgQllcDk + rm /tmp/tmp.6W8d1HWsBF /tmp/tmp.9JvgQllcDk + return 0 + kubectl_bin wait --for=delete namespace expose-sharded-3967 ++ mktemp + local LAST_OUT=/tmp/tmp.q58g6v8UDV ++ mktemp + local LAST_ERR=/tmp/tmp.QBcJuUIdg6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace expose-sharded-3967 namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.q58g6v8UDV + cat /tmp/tmp.QBcJuUIdg6 + rm /tmp/tmp.q58g6v8UDV /tmp/tmp.QBcJuUIdg6 + return 0 + desc 'create namespace expose-sharded-3967' + set +o xtrace ----------------------------------------------------------------------------------- create namespace expose-sharded-3967 ----------------------------------------------------------------------------------- + kubectl_bin create namespace expose-sharded-3967 ++ mktemp + local LAST_OUT=/tmp/tmp.GLTayFtkdJ ++ mktemp + local LAST_ERR=/tmp/tmp.4LtWenkdi1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace expose-sharded-3967 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GLTayFtkdJ namespace/expose-sharded-3967 created + cat /tmp/tmp.4LtWenkdi1 + rm /tmp/tmp.GLTayFtkdJ /tmp/tmp.4LtWenkdi1 + return 0 + set_kube_ctx expose-sharded-3967 + local namespace=expose-sharded-3967 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.KFw1ETh72r +++ mktemp ++ local LAST_ERR=/tmp/tmp.rIlonCocdy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KFw1ETh72r ++ cat /tmp/tmp.rIlonCocdy ++ rm /tmp/tmp.KFw1ETh72r /tmp/tmp.rIlonCocdy ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1965-0d670696-1-cluster1 --namespace=expose-sharded-3967 ++ mktemp + local LAST_OUT=/tmp/tmp.oxV9SYd7UQ ++ mktemp + local LAST_ERR=/tmp/tmp.nWM2K8B1qw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1965-0d670696-1-cluster1 --namespace=expose-sharded-3967 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oxV9SYd7UQ Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1965-0d670696-1-cluster1" modified. + cat /tmp/tmp.nWM2K8B1qw + rm /tmp/tmp.oxV9SYd7UQ /tmp/tmp.nWM2K8B1qw + return 0 + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + cluster=some-name + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.DIWEneFCKS ++ mktemp + local LAST_ERR=/tmp/tmp.VDOkR4xYrK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DIWEneFCKS secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.VDOkR4xYrK + rm /tmp/tmp.DIWEneFCKS /tmp/tmp.VDOkR4xYrK + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.MMGcMszRef ++ mktemp + local LAST_ERR=/tmp/tmp.clq3eMOHEe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MMGcMszRef secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.clq3eMOHEe + rm /tmp/tmp.MMGcMszRef /tmp/tmp.clq3eMOHEe + return 0 + version_gt 1.19 ++ echo '1.30 >= 1.19' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' 0 -ne 1 ']' + /usr/bin/sed s/docker/runc/g + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/conf/container-rc.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.JnrRTCRJMT ++ mktemp + local LAST_ERR=/tmp/tmp.9mK6UK2QZC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JnrRTCRJMT runtimeclass.node.k8s.io/container-rc unchanged + cat /tmp/tmp.9mK6UK2QZC + rm /tmp/tmp.JnrRTCRJMT /tmp/tmp.9mK6UK2QZC + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/conf/some-name-rs0.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/conf/some-name-rs0.yml ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1965-0d670696"' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/conf/some-name-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.AHu7FVQSiR ++ mktemp + local LAST_ERR=/tmp/tmp.Eos3xQ2UfQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AHu7FVQSiR perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.Eos3xQ2UfQ + rm /tmp/tmp.AHu7FVQSiR /tmp/tmp.Eos3xQ2UfQ + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready......OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready........OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jsIbkVE7si +++ mktemp ++ local LAST_ERR=/tmp/tmp.x0ZsYe7WCm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jsIbkVE7si ++ cat /tmp/tmp.x0ZsYe7WCm ++ rm /tmp/tmp.jsIbkVE7si /tmp/tmp.x0ZsYe7WCm ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.....OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oyRG4tTmYq +++ mktemp ++ local LAST_ERR=/tmp/tmp.72zCMAQPNn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oyRG4tTmYq ++ cat /tmp/tmp.72zCMAQPNn ++ rm /tmp/tmp.oyRG4tTmYq /tmp/tmp.72zCMAQPNn ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0ypuy1ZBM9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.OWiupVYyIP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0ypuy1ZBM9 ++ cat /tmp/tmp.OWiupVYyIP ++ rm /tmp/tmp.0ypuy1ZBM9 /tmp/tmp.OWiupVYyIP ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness......................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.I7yePjB2Aa +++ mktemp ++ local LAST_ERR=/tmp/tmp.cCc0GLiYz7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.I7yePjB2Aa ++ cat /tmp/tmp.cCc0GLiYz7 ++ rm /tmp/tmp.I7yePjB2Aa /tmp/tmp.cCc0GLiYz7 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xFZUIUaGlu +++ mktemp ++ local LAST_ERR=/tmp/tmp.UN384ado2G ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xFZUIUaGlu ++ cat /tmp/tmp.UN384ado2G ++ rm /tmp/tmp.xFZUIUaGlu /tmp/tmp.UN384ado2G ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mcV5VZ5r3v +++ mktemp ++ local LAST_ERR=/tmp/tmp.hkS9mSWXkP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mcV5VZ5r3v ++ cat /tmp/tmp.hkS9mSWXkP ++ rm /tmp/tmp.mcV5VZ5r3v /tmp/tmp.hkS9mSWXkP ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ijw3McvB0v +++ mktemp ++ local LAST_ERR=/tmp/tmp.zxv9WAThX7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ijw3McvB0v ++ cat /tmp/tmp.zxv9WAThX7 ++ rm /tmp/tmp.Ijw3McvB0v /tmp/tmp.zxv9WAThX7 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4830ctulb6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.U3ghTjjy2K ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4830ctulb6 ++ cat /tmp/tmp.U3ghTjjy2K ++ rm /tmp/tmp.4830ctulb6 /tmp/tmp.U3ghTjjy2K ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xkU5lHMscE +++ mktemp ++ local LAST_ERR=/tmp/tmp.zhlj23v1oa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xkU5lHMscE ++ cat /tmp/tmp.zhlj23v1oa ++ rm /tmp/tmp.xkU5lHMscE /tmp/tmp.zhlj23v1oa ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3JPndjPGZ6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.SrqTEtxFsB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3JPndjPGZ6 ++ cat /tmp/tmp.SrqTEtxFsB ++ rm /tmp/tmp.3JPndjPGZ6 /tmp/tmp.SrqTEtxFsB ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.TQYCeunbst/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-3967", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.ntDZzpL1G6 ++ mktemp + local LAST_ERR=/tmp/tmp.vbsRgCyNYG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ntDZzpL1G6 + cat /tmp/tmp.vbsRgCyNYG + rm /tmp/tmp.ntDZzpL1G6 /tmp/tmp.vbsRgCyNYG + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.TQYCeunbst/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.TQYCeunbst/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.TQYCeunbst/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0.yml /tmp/tmp.TQYCeunbst/statefulset_some-name-rs0.yml + compare_kubectl statefulset/some-name-cfg + local resource=statefulset/some-name-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml + local new_result=/tmp/tmp.TQYCeunbst/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-3967", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.lacAbkqzjy ++ mktemp + local LAST_ERR=/tmp/tmp.8rK3JLGnAm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lacAbkqzjy + cat /tmp/tmp.8rK3JLGnAm + rm /tmp/tmp.lacAbkqzjy /tmp/tmp.8rK3JLGnAm + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.TQYCeunbst/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.TQYCeunbst/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.TQYCeunbst/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml /tmp/tmp.TQYCeunbst/statefulset_some-name-cfg.yml + compare_kubectl statefulset/some-name-mongos '' + local resource=statefulset/some-name-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml + local new_result=/tmp/tmp.TQYCeunbst/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-3967", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.WqkFObS6UM ++ mktemp + local LAST_ERR=/tmp/tmp.JXwJOemA1N + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WqkFObS6UM + cat /tmp/tmp.JXwJOemA1N + rm /tmp/tmp.WqkFObS6UM /tmp/tmp.JXwJOemA1N + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.TQYCeunbst/statefulset_some-name-mongos.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.TQYCeunbst/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.TQYCeunbst/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml /tmp/tmp.TQYCeunbst/statefulset_some-name-mongos.yml + desc 'disabling sharding' + set +o xtrace ----------------------------------------------------------------------------------- disabling sharding ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=json '-p=[{"op": "replace", "path": "/spec/sharding/enabled", "value": false}]' ++ mktemp + local LAST_OUT=/tmp/tmp.wKdEJxwrAD ++ mktemp + local LAST_ERR=/tmp/tmp.TGJbuWAzwR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=json '-p=[{"op": "replace", "path": "/spec/sharding/enabled", "value": false}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wKdEJxwrAD perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.TGJbuWAzwR + rm /tmp/tmp.wKdEJxwrAD /tmp/tmp.TGJbuWAzwR + return 0 + sleep 10 + wait_cluster_consistency some-name 60 + local cluster_name=some-name + local wait_time=60 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cyA5Q99hU4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.mwEkxm0hMx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cyA5Q99hU4 ++ cat /tmp/tmp.mwEkxm0hMx ++ rm /tmp/tmp.cyA5Q99hU4 /tmp/tmp.mwEkxm0hMx ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PheP6DyLzL +++ mktemp ++ local LAST_ERR=/tmp/tmp.a2qRjDVTKX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PheP6DyLzL ++ cat /tmp/tmp.a2qRjDVTKX ++ rm /tmp/tmp.PheP6DyLzL /tmp/tmp.a2qRjDVTKX ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.B6XOpHON3R +++ mktemp ++ local LAST_ERR=/tmp/tmp.uZRfAsrcSZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.B6XOpHON3R ++ cat /tmp/tmp.uZRfAsrcSZ ++ rm /tmp/tmp.B6XOpHON3R /tmp/tmp.uZRfAsrcSZ ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dZu964OInm +++ mktemp ++ local LAST_ERR=/tmp/tmp.OXK28QiuHb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dZu964OInm ++ cat /tmp/tmp.OXK28QiuHb ++ rm /tmp/tmp.dZu964OInm /tmp/tmp.OXK28QiuHb ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.c5O1DuIZ6w +++ mktemp ++ local LAST_ERR=/tmp/tmp.815fDWpm2O ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.c5O1DuIZ6w ++ cat /tmp/tmp.815fDWpm2O ++ rm /tmp/tmp.c5O1DuIZ6w /tmp/tmp.815fDWpm2O ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VOXNzLNI3Y +++ mktemp ++ local LAST_ERR=/tmp/tmp.PLvAGeymfF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VOXNzLNI3Y ++ cat /tmp/tmp.PLvAGeymfF ++ rm /tmp/tmp.VOXNzLNI3Y /tmp/tmp.PLvAGeymfF ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.d9rW2OJB11 +++ mktemp ++ local LAST_ERR=/tmp/tmp.hDosjq0Swi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.d9rW2OJB11 ++ cat /tmp/tmp.hDosjq0Swi ++ rm /tmp/tmp.d9rW2OJB11 /tmp/tmp.hDosjq0Swi ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zR1uJBMdxl +++ mktemp ++ local LAST_ERR=/tmp/tmp.13EcURWnYR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zR1uJBMdxl ++ cat /tmp/tmp.13EcURWnYR ++ rm /tmp/tmp.zR1uJBMdxl /tmp/tmp.13EcURWnYR ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ddSfsyHl6s +++ mktemp ++ local LAST_ERR=/tmp/tmp.cbRqtSWxcC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ddSfsyHl6s ++ cat /tmp/tmp.cbRqtSWxcC ++ rm /tmp/tmp.ddSfsyHl6s /tmp/tmp.cbRqtSWxcC ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gR3Et9dAUn +++ mktemp ++ local LAST_ERR=/tmp/tmp.kIgg0gm5GK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gR3Et9dAUn ++ cat /tmp/tmp.kIgg0gm5GK ++ rm /tmp/tmp.gR3Et9dAUn /tmp/tmp.kIgg0gm5GK ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DVlvvxH1ZV +++ mktemp ++ local LAST_ERR=/tmp/tmp.g1CemB4FTb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DVlvvxH1ZV ++ cat /tmp/tmp.g1CemB4FTb ++ rm /tmp/tmp.DVlvvxH1ZV /tmp/tmp.g1CemB4FTb ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lTzYWNzK8V +++ mktemp ++ local LAST_ERR=/tmp/tmp.7q7sRagqvx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lTzYWNzK8V ++ cat /tmp/tmp.7q7sRagqvx ++ rm /tmp/tmp.lTzYWNzK8V /tmp/tmp.7q7sRagqvx ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3EaOep1ApF +++ mktemp ++ local LAST_ERR=/tmp/tmp.6u545c0J2P ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3EaOep1ApF ++ cat /tmp/tmp.6u545c0J2P ++ rm /tmp/tmp.3EaOep1ApF /tmp/tmp.6u545c0J2P ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.l0bGyKoB3m +++ mktemp ++ local LAST_ERR=/tmp/tmp.HQgaJfdAS6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.l0bGyKoB3m ++ cat /tmp/tmp.HQgaJfdAS6 ++ rm /tmp/tmp.l0bGyKoB3m /tmp/tmp.HQgaJfdAS6 ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.73rEDOwHZb +++ mktemp ++ local LAST_ERR=/tmp/tmp.eiZ6S6WGk3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.73rEDOwHZb ++ cat /tmp/tmp.eiZ6S6WGk3 ++ rm /tmp/tmp.73rEDOwHZb /tmp/tmp.eiZ6S6WGk3 ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rXhCD8Qtzo +++ mktemp ++ local LAST_ERR=/tmp/tmp.WhDhYRAzA7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rXhCD8Qtzo ++ cat /tmp/tmp.WhDhYRAzA7 ++ rm /tmp/tmp.rXhCD8Qtzo /tmp/tmp.WhDhYRAzA7 ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cCV2XBV73Y +++ mktemp ++ local LAST_ERR=/tmp/tmp.rfqIz6ftSF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cCV2XBV73Y ++ cat /tmp/tmp.rfqIz6ftSF ++ rm /tmp/tmp.cCV2XBV73Y /tmp/tmp.rfqIz6ftSF ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 17 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kS3jRLDUqG +++ mktemp ++ local LAST_ERR=/tmp/tmp.mMz662nWqk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kS3jRLDUqG ++ cat /tmp/tmp.mMz662nWqk ++ rm /tmp/tmp.kS3jRLDUqG /tmp/tmp.mMz662nWqk ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 18 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cHyy6rucix +++ mktemp ++ local LAST_ERR=/tmp/tmp.gRIjhmWLIf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cHyy6rucix ++ cat /tmp/tmp.gRIjhmWLIf ++ rm /tmp/tmp.cHyy6rucix /tmp/tmp.gRIjhmWLIf ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 19 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E9jlxtlbnq +++ mktemp ++ local LAST_ERR=/tmp/tmp.EPmZofeHUM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E9jlxtlbnq ++ cat /tmp/tmp.EPmZofeHUM ++ rm /tmp/tmp.E9jlxtlbnq /tmp/tmp.EPmZofeHUM ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 20 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cdmcsZXE7g +++ mktemp ++ local LAST_ERR=/tmp/tmp.F2BL0eUrRV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cdmcsZXE7g ++ cat /tmp/tmp.F2BL0eUrRV ++ rm /tmp/tmp.cdmcsZXE7g /tmp/tmp.F2BL0eUrRV ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 21 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ts250JF0Ch +++ mktemp ++ local LAST_ERR=/tmp/tmp.KpuSN9nGN3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ts250JF0Ch ++ cat /tmp/tmp.KpuSN9nGN3 ++ rm /tmp/tmp.ts250JF0Ch /tmp/tmp.KpuSN9nGN3 ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 22 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nyMLVhA7mp +++ mktemp ++ local LAST_ERR=/tmp/tmp.Yb45aFisH9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nyMLVhA7mp ++ cat /tmp/tmp.Yb45aFisH9 ++ rm /tmp/tmp.nyMLVhA7mp /tmp/tmp.Yb45aFisH9 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 23 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ueAhBRdfXQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZZBuM5bSSr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ueAhBRdfXQ ++ cat /tmp/tmp.ZZBuM5bSSr ++ rm /tmp/tmp.ueAhBRdfXQ /tmp/tmp.ZZBuM5bSSr ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 24 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XHQOTNuC7W +++ mktemp ++ local LAST_ERR=/tmp/tmp.F0QAW52py7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XHQOTNuC7W ++ cat /tmp/tmp.F0QAW52py7 ++ rm /tmp/tmp.XHQOTNuC7W /tmp/tmp.F0QAW52py7 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 25 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1ACXKCeaox +++ mktemp ++ local LAST_ERR=/tmp/tmp.udxg6XhMSN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1ACXKCeaox ++ cat /tmp/tmp.udxg6XhMSN ++ rm /tmp/tmp.1ACXKCeaox /tmp/tmp.udxg6XhMSN ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 26 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.r2e0g4xyei +++ mktemp ++ local LAST_ERR=/tmp/tmp.FW9cyZzYqI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.r2e0g4xyei ++ cat /tmp/tmp.FW9cyZzYqI ++ rm /tmp/tmp.r2e0g4xyei /tmp/tmp.FW9cyZzYqI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 27 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ODCWytA4nx +++ mktemp ++ local LAST_ERR=/tmp/tmp.dmpdSGUHj0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ODCWytA4nx ++ cat /tmp/tmp.dmpdSGUHj0 ++ rm /tmp/tmp.ODCWytA4nx /tmp/tmp.dmpdSGUHj0 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 28 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VGGZsD6c29 +++ mktemp ++ local LAST_ERR=/tmp/tmp.awd306FRye ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VGGZsD6c29 ++ cat /tmp/tmp.awd306FRye ++ rm /tmp/tmp.VGGZsD6c29 /tmp/tmp.awd306FRye ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + compare_kubectl statefulset/some-name-rs0 -sharding-disabled + local resource=statefulset/some-name-rs0 + local postfix=-sharding-disabled + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-disabled.yml + local new_result=/tmp/tmp.TQYCeunbst/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-disabled-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-3967", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.6yPTymbbyc ++ mktemp + local LAST_ERR=/tmp/tmp.b2OuRnP3ZV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6yPTymbbyc + cat /tmp/tmp.b2OuRnP3ZV + rm /tmp/tmp.6yPTymbbyc /tmp/tmp.b2OuRnP3ZV + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.TQYCeunbst/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.TQYCeunbst/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.TQYCeunbst/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-disabled.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-disabled.yml /tmp/tmp.TQYCeunbst/statefulset_some-name-rs0.yml ++ kubectl_bin get sts -o yaml +++ mktemp ++ local LAST_OUT=/tmp/tmp.P9Ve6ML03k +++ mktemp ++ yq '.items | length' ++ local LAST_ERR=/tmp/tmp.eg6oAM6aR2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get sts -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.P9Ve6ML03k ++ cat /tmp/tmp.eg6oAM6aR2 ++ rm /tmp/tmp.P9Ve6ML03k /tmp/tmp.eg6oAM6aR2 ++ return 0 + [[ 1 != 1 ]] + desc 'enabling sharding' + set +o xtrace ----------------------------------------------------------------------------------- enabling sharding ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=json '-p=[{"op": "replace", "path": "/spec/sharding/enabled", "value": true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.vfDJC1Qzd1 ++ mktemp + local LAST_ERR=/tmp/tmp.pwERhXCOWk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=json '-p=[{"op": "replace", "path": "/spec/sharding/enabled", "value": true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vfDJC1Qzd1 perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.pwERhXCOWk + rm /tmp/tmp.vfDJC1Qzd1 /tmp/tmp.pwERhXCOWk + return 0 + sleep 10 + wait_cluster_consistency some-name 60 + local cluster_name=some-name + local wait_time=60 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.y6hpnqzKIu +++ mktemp ++ local LAST_ERR=/tmp/tmp.H0YHH1aB5A ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.y6hpnqzKIu ++ cat /tmp/tmp.H0YHH1aB5A ++ rm /tmp/tmp.y6hpnqzKIu /tmp/tmp.H0YHH1aB5A ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4A8QqUeLe0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.okGWnZLUD0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4A8QqUeLe0 ++ cat /tmp/tmp.okGWnZLUD0 ++ rm /tmp/tmp.4A8QqUeLe0 /tmp/tmp.okGWnZLUD0 ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3oUjgBuaqD +++ mktemp ++ local LAST_ERR=/tmp/tmp.HdjV1fIIqE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3oUjgBuaqD ++ cat /tmp/tmp.HdjV1fIIqE ++ rm /tmp/tmp.3oUjgBuaqD /tmp/tmp.HdjV1fIIqE ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SDVJTedZmu +++ mktemp ++ local LAST_ERR=/tmp/tmp.1BqiFBIT9d ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SDVJTedZmu ++ cat /tmp/tmp.1BqiFBIT9d ++ rm /tmp/tmp.SDVJTedZmu /tmp/tmp.1BqiFBIT9d ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mLbQNEPQvB +++ mktemp ++ local LAST_ERR=/tmp/tmp.F8UyxZAZUm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mLbQNEPQvB ++ cat /tmp/tmp.F8UyxZAZUm ++ rm /tmp/tmp.mLbQNEPQvB /tmp/tmp.F8UyxZAZUm ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LQCDIkjfHc +++ mktemp ++ local LAST_ERR=/tmp/tmp.WQIssOLluR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LQCDIkjfHc ++ cat /tmp/tmp.WQIssOLluR ++ rm /tmp/tmp.LQCDIkjfHc /tmp/tmp.WQIssOLluR ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QXQta8yoac +++ mktemp ++ local LAST_ERR=/tmp/tmp.Pz2aML0305 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QXQta8yoac ++ cat /tmp/tmp.Pz2aML0305 ++ rm /tmp/tmp.QXQta8yoac /tmp/tmp.Pz2aML0305 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.23aNGOpwDD +++ mktemp ++ local LAST_ERR=/tmp/tmp.U9V0iTfFRL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.23aNGOpwDD ++ cat /tmp/tmp.U9V0iTfFRL ++ rm /tmp/tmp.23aNGOpwDD /tmp/tmp.U9V0iTfFRL ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fzxJ7MZogW +++ mktemp ++ local LAST_ERR=/tmp/tmp.UZwrcX0Dvr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fzxJ7MZogW ++ cat /tmp/tmp.UZwrcX0Dvr ++ rm /tmp/tmp.fzxJ7MZogW /tmp/tmp.UZwrcX0Dvr ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gv2eCeOUYY +++ mktemp ++ local LAST_ERR=/tmp/tmp.fUBAZxAyAx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gv2eCeOUYY ++ cat /tmp/tmp.fUBAZxAyAx ++ rm /tmp/tmp.gv2eCeOUYY /tmp/tmp.fUBAZxAyAx ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IALCdg8AsF +++ mktemp ++ local LAST_ERR=/tmp/tmp.9OmUrKxJ2X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IALCdg8AsF ++ cat /tmp/tmp.9OmUrKxJ2X ++ rm /tmp/tmp.IALCdg8AsF /tmp/tmp.9OmUrKxJ2X ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GG5q6zzPSK +++ mktemp ++ local LAST_ERR=/tmp/tmp.dPWdByaWRl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GG5q6zzPSK ++ cat /tmp/tmp.dPWdByaWRl ++ rm /tmp/tmp.GG5q6zzPSK /tmp/tmp.dPWdByaWRl ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ResXvm60s2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.6vjH65p7pG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ResXvm60s2 ++ cat /tmp/tmp.6vjH65p7pG ++ rm /tmp/tmp.ResXvm60s2 /tmp/tmp.6vjH65p7pG ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5zUDum9oOQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.O25eAc1FKO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5zUDum9oOQ ++ cat /tmp/tmp.O25eAc1FKO ++ rm /tmp/tmp.5zUDum9oOQ /tmp/tmp.O25eAc1FKO ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.plCVfz5EVY +++ mktemp ++ local LAST_ERR=/tmp/tmp.9P00KYusmb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.plCVfz5EVY ++ cat /tmp/tmp.9P00KYusmb ++ rm /tmp/tmp.plCVfz5EVY /tmp/tmp.9P00KYusmb ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PY653hdwMF +++ mktemp ++ local LAST_ERR=/tmp/tmp.5esmT8RTxd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PY653hdwMF ++ cat /tmp/tmp.5esmT8RTxd ++ rm /tmp/tmp.PY653hdwMF /tmp/tmp.5esmT8RTxd ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VmXH1FyMhD +++ mktemp ++ local LAST_ERR=/tmp/tmp.3SJ5xHx0sI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VmXH1FyMhD ++ cat /tmp/tmp.3SJ5xHx0sI ++ rm /tmp/tmp.VmXH1FyMhD /tmp/tmp.3SJ5xHx0sI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 17 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.luKncAVLK0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Mp1pk4g0dy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.luKncAVLK0 ++ cat /tmp/tmp.Mp1pk4g0dy ++ rm /tmp/tmp.luKncAVLK0 /tmp/tmp.Mp1pk4g0dy ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 18 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.33jqMXETNb +++ mktemp ++ local LAST_ERR=/tmp/tmp.FG82j8zebr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.33jqMXETNb ++ cat /tmp/tmp.FG82j8zebr ++ rm /tmp/tmp.33jqMXETNb /tmp/tmp.FG82j8zebr ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 19 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0I7lsMxRbp +++ mktemp ++ local LAST_ERR=/tmp/tmp.7MOUgOR0SW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0I7lsMxRbp ++ cat /tmp/tmp.7MOUgOR0SW ++ rm /tmp/tmp.0I7lsMxRbp /tmp/tmp.7MOUgOR0SW ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 20 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GtKk94qWri +++ mktemp ++ local LAST_ERR=/tmp/tmp.0BeZtTjQyS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GtKk94qWri ++ cat /tmp/tmp.0BeZtTjQyS ++ rm /tmp/tmp.GtKk94qWri /tmp/tmp.0BeZtTjQyS ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 21 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ut1e46IEao +++ mktemp ++ local LAST_ERR=/tmp/tmp.LWTe54mObC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ut1e46IEao ++ cat /tmp/tmp.LWTe54mObC ++ rm /tmp/tmp.Ut1e46IEao /tmp/tmp.LWTe54mObC ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + compare_kubectl statefulset/some-name-rs0 -sharding-enabled + local resource=statefulset/some-name-rs0 + local postfix=-sharding-enabled + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-enabled.yml + local new_result=/tmp/tmp.TQYCeunbst/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-enabled-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-3967", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.b5tuD3UXsL ++ mktemp + local LAST_ERR=/tmp/tmp.z3yoCubINt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.b5tuD3UXsL + cat /tmp/tmp.z3yoCubINt + rm /tmp/tmp.b5tuD3UXsL /tmp/tmp.z3yoCubINt + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.TQYCeunbst/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.TQYCeunbst/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.TQYCeunbst/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-enabled.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-enabled.yml /tmp/tmp.TQYCeunbst/statefulset_some-name-rs0.yml + compare_kubectl statefulset/some-name-cfg + local resource=statefulset/some-name-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml + local new_result=/tmp/tmp.TQYCeunbst/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-3967", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.DobkoLVO8o ++ mktemp + local LAST_ERR=/tmp/tmp.ZHFIzrfB1g + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DobkoLVO8o + cat /tmp/tmp.ZHFIzrfB1g + rm /tmp/tmp.DobkoLVO8o /tmp/tmp.ZHFIzrfB1g + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.TQYCeunbst/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.TQYCeunbst/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.TQYCeunbst/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml /tmp/tmp.TQYCeunbst/statefulset_some-name-cfg.yml + compare_kubectl statefulset/some-name-mongos '' + local resource=statefulset/some-name-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml + local new_result=/tmp/tmp.TQYCeunbst/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-3967", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.8pc2Mr43wu ++ mktemp + local LAST_ERR=/tmp/tmp.LmttGXLVq8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8pc2Mr43wu + cat /tmp/tmp.LmttGXLVq8 + rm /tmp/tmp.8pc2Mr43wu /tmp/tmp.LmttGXLVq8 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.TQYCeunbst/statefulset_some-name-mongos.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.TQYCeunbst/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.TQYCeunbst/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml /tmp/tmp.TQYCeunbst/statefulset_some-name-mongos.yml + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.expose-sharded-3967 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5S9uPDN7G5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ivteR59ul8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5S9uPDN7G5 ++ cat /tmp/tmp.ivteR59ul8 ++ rm /tmp/tmp.5S9uPDN7G5 /tmp/tmp.ivteR59ul8 ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.vMfMdj2Fww ++ mktemp + local LAST_ERR=/tmp/tmp.HuaYvK2Vsf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vMfMdj2Fww Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("4bbaa9bb-023e-4f6b-9ea7-e961e7cf10ab") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.HuaYvK2Vsf + rm /tmp/tmp.vMfMdj2Fww /tmp/tmp.HuaYvK2Vsf + return 0 + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-3967 mongodb + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.innARbRtAl +++ mktemp ++ local LAST_ERR=/tmp/tmp.PWApDiwuUZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.innARbRtAl ++ cat /tmp/tmp.PWApDiwuUZ ++ rm /tmp/tmp.innARbRtAl /tmp/tmp.PWApDiwuUZ ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-3967 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.i4TXGsRr3K ++ mktemp + local LAST_ERR=/tmp/tmp.vdAVbQ56JK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.i4TXGsRr3K Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.expose-sharded-3967.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("0f86d4bb-4ea0-4081-951b-b6ff5832e358") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.vdAVbQ56JK + rm /tmp/tmp.i4TXGsRr3K /tmp/tmp.vdAVbQ56JK + return 0 + run_mongos 'sh.enableSharding("myApp","rs0")' clusterAdmin:clusterAdmin123456@some-name-mongos.expose-sharded-3967 + local 'command=sh.enableSharding("myApp","rs0")' + local uri=clusterAdmin:clusterAdmin123456@some-name-mongos.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XJBYNI7mbg +++ mktemp ++ local LAST_ERR=/tmp/tmp.WCo3VQQOmj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XJBYNI7mbg ++ cat /tmp/tmp.WCo3VQQOmj ++ rm /tmp/tmp.XJBYNI7mbg /tmp/tmp.WCo3VQQOmj ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''sh.enableSharding("myApp","rs0")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.Z92Ids4b30 ++ mktemp + local LAST_ERR=/tmp/tmp.0fMK1L3Bp1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''sh.enableSharding("myApp","rs0")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Z92Ids4b30 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("d5605199-62d3-4367-921d-17bc9ee0392c") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1749437935, 9), "signature" : { "hash" : BinData(0,"sGiaYiwC+3OB3CntN3eZh1XbO4A="), "keyId" : NumberLong("7513775551815876609") } }, "operationTime" : Timestamp(1749437935, 3) } bye + cat /tmp/tmp.0fMK1L3Bp1 + rm /tmp/tmp.Z92Ids4b30 /tmp/tmp.0fMK1L3Bp1 + return 0 + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.expose-sharded-3967 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ awk -F: '{print $2}' ++ echo .svc.cluster.local + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BjIs5vpdqy +++ mktemp ++ local LAST_ERR=/tmp/tmp.CZNxxvtwZy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BjIs5vpdqy ++ cat /tmp/tmp.CZNxxvtwZy ++ rm /tmp/tmp.BjIs5vpdqy /tmp/tmp.CZNxxvtwZy ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.NqocTCYwha ++ mktemp + local LAST_ERR=/tmp/tmp.pmX10vl0TZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NqocTCYwha Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("7fe4867f-9bff-49e3-af8d-95dd05492132") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.pmX10vl0TZ + rm /tmp/tmp.NqocTCYwha /tmp/tmp.pmX10vl0TZ + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-3967 + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-3967 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-3967 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bGQGdxifF1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.7sDG25Xeqj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bGQGdxifF1 ++ cat /tmp/tmp.7sDG25Xeqj ++ rm /tmp/tmp.bGQGdxifF1 /tmp/tmp.7sDG25Xeqj ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.OTW9svOQrC ++ mktemp + local LAST_ERR=/tmp/tmp.WfyFZYpbi4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OTW9svOQrC + cat /tmp/tmp.WfyFZYpbi4 + rm /tmp/tmp.OTW9svOQrC /tmp/tmp.WfyFZYpbi4 + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find.json /tmp/tmp.TQYCeunbst/find + desc 'Unexposed -> Exposed, ClusterIP' + set +o xtrace ----------------------------------------------------------------------------------- Unexposed -> Exposed, ClusterIP ----------------------------------------------------------------------------------- + expose_cluster ClusterIP + expose_type=ClusterIP + expose_status=true + kubectl_bin patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "type" : "ClusterIP" } }]' ++ mktemp + local LAST_OUT=/tmp/tmp.qfCULTZ2bC ++ mktemp + local LAST_ERR=/tmp/tmp.TSBhOc4bZd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "type" : "ClusterIP" } }]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qfCULTZ2bC perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.TSBhOc4bZd + rm /tmp/tmp.qfCULTZ2bC /tmp/tmp.TSBhOc4bZd + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7rPuNqTKgX +++ mktemp ++ local LAST_ERR=/tmp/tmp.Q95jZvsPiv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7rPuNqTKgX ++ cat /tmp/tmp.Q95jZvsPiv ++ rm /tmp/tmp.7rPuNqTKgX /tmp/tmp.Q95jZvsPiv ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YkJ7IucPSD +++ mktemp ++ local LAST_ERR=/tmp/tmp.l19T6LC044 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YkJ7IucPSD ++ cat /tmp/tmp.l19T6LC044 ++ rm /tmp/tmp.YkJ7IucPSD /tmp/tmp.l19T6LC044 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yWWOYXC1p8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.cUBLqr9NiQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yWWOYXC1p8 ++ cat /tmp/tmp.cUBLqr9NiQ ++ rm /tmp/tmp.yWWOYXC1p8 /tmp/tmp.cUBLqr9NiQ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Cy6w6oJhws +++ mktemp ++ local LAST_ERR=/tmp/tmp.MgpdOio08w ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Cy6w6oJhws ++ cat /tmp/tmp.MgpdOio08w ++ rm /tmp/tmp.Cy6w6oJhws /tmp/tmp.MgpdOio08w ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Rwg1cI0YaG +++ mktemp ++ local LAST_ERR=/tmp/tmp.yFUjz0nkYX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Rwg1cI0YaG ++ cat /tmp/tmp.yFUjz0nkYX ++ rm /tmp/tmp.Rwg1cI0YaG /tmp/tmp.yFUjz0nkYX ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.62xwVG4GZo +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z99dmUT5o8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.62xwVG4GZo ++ cat /tmp/tmp.Z99dmUT5o8 ++ rm /tmp/tmp.62xwVG4GZo /tmp/tmp.Z99dmUT5o8 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mufpDNKzsW +++ mktemp ++ local LAST_ERR=/tmp/tmp.L63iuGk8R8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mufpDNKzsW ++ cat /tmp/tmp.L63iuGk8R8 ++ rm /tmp/tmp.mufpDNKzsW /tmp/tmp.L63iuGk8R8 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NSqb3V8ji6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.KKp9X2ZoEe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NSqb3V8ji6 ++ cat /tmp/tmp.KKp9X2ZoEe ++ rm /tmp/tmp.NSqb3V8ji6 /tmp/tmp.KKp9X2ZoEe ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hse6dxfkc0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.v1d9m0arYk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hse6dxfkc0 ++ cat /tmp/tmp.v1d9m0arYk ++ rm /tmp/tmp.hse6dxfkc0 /tmp/tmp.v1d9m0arYk ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AfceUxXUJE +++ mktemp ++ local LAST_ERR=/tmp/tmp.x7qyOQ9aro ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AfceUxXUJE ++ cat /tmp/tmp.x7qyOQ9aro ++ rm /tmp/tmp.AfceUxXUJE /tmp/tmp.x7qyOQ9aro ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.expose-sharded-3967 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dOcllG4sl3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.JnAacHQW9J ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dOcllG4sl3 ++ cat /tmp/tmp.JnAacHQW9J ++ rm /tmp/tmp.dOcllG4sl3 /tmp/tmp.JnAacHQW9J ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.i0E0BBSr2b ++ mktemp + local LAST_ERR=/tmp/tmp.pnWZ84G43P + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.i0E0BBSr2b Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("14bb0042-beca-4b01-ba15-6cd997b1928d") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.pnWZ84G43P + rm /tmp/tmp.i0E0BBSr2b /tmp/tmp.pnWZ84G43P + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-3967 -2nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-3967 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local port=27017 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-3967 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ awk -F: '{print $2}' ++ echo .svc.cluster.local + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.693ePsbyOZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.8o5uWTJEE0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.693ePsbyOZ ++ cat /tmp/tmp.8o5uWTJEE0 ++ rm /tmp/tmp.693ePsbyOZ /tmp/tmp.8o5uWTJEE0 ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.zgWqpNetDj ++ mktemp + local LAST_ERR=/tmp/tmp.ZzPXK89RYa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zgWqpNetDj + cat /tmp/tmp.ZzPXK89RYa + rm /tmp/tmp.zgWqpNetDj /tmp/tmp.ZzPXK89RYa + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-2nd.json /tmp/tmp.TQYCeunbst/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-06-09T03:00:14+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aoZiWNPvOW +++ mktemp ++ local LAST_ERR=/tmp/tmp.IftUemxJQ6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aoZiWNPvOW ++ cat /tmp/tmp.IftUemxJQ6 ++ rm /tmp/tmp.aoZiWNPvOW /tmp/tmp.IftUemxJQ6 ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.hjHSgkXZlY ++ mktemp + local LAST_ERR=/tmp/tmp.McAsKCWGA2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hjHSgkXZlY + cat /tmp/tmp.McAsKCWGA2 + rm /tmp/tmp.hjHSgkXZlY /tmp/tmp.McAsKCWGA2 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-2nd.json /tmp/tmp.TQYCeunbst/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-06-09T03:00:16+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.m918ZUDEVM +++ mktemp ++ local LAST_ERR=/tmp/tmp.64ZrFhzPY8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.m918ZUDEVM ++ cat /tmp/tmp.64ZrFhzPY8 ++ rm /tmp/tmp.m918ZUDEVM /tmp/tmp.64ZrFhzPY8 ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.HnomMk1WGJ ++ mktemp + local LAST_ERR=/tmp/tmp.JlmPMEiqr7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HnomMk1WGJ + cat /tmp/tmp.JlmPMEiqr7 + rm /tmp/tmp.HnomMk1WGJ /tmp/tmp.JlmPMEiqr7 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-2nd.json /tmp/tmp.TQYCeunbst/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-06-09T03:00:19+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.HRonJlZXyU +++ mktemp ++ local LAST_ERR=/tmp/tmp.aUgjAKfW0Z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HRonJlZXyU ++ cat /tmp/tmp.aUgjAKfW0Z ++ rm /tmp/tmp.HRonJlZXyU /tmp/tmp.aUgjAKfW0Z ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.LzJKGI5Eso ++ mktemp + local LAST_ERR=/tmp/tmp.v5pSxQZiye + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LzJKGI5Eso + cat /tmp/tmp.v5pSxQZiye + rm /tmp/tmp.LzJKGI5Eso /tmp/tmp.v5pSxQZiye + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-2nd.json /tmp/tmp.TQYCeunbst/find-2nd + compare_mongo_config some-name expose-sharded-3967 + cluster=some-name + namespace=expose-sharded-3967 + enable_expose=true + desc 'Compare mongo config' + set +o xtrace ----------------------------------------------------------------------------------- Compare mongo config ----------------------------------------------------------------------------------- + cfg_0_endpoint=some-name-cfg-0.some-name-cfg.expose-sharded-3967.svc.cluster.local ++ run_mongo 'var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-cfg-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })' clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-3967 ++ local 'command=var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-cfg-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-3967 ++ local driver=mongodb+srv ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Ir5stjAE6m ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IiRAB3zTcZ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Ir5stjAE6m +++ cat /tmp/tmp.IiRAB3zTcZ +++ rm /tmp/tmp.Ir5stjAE6m /tmp/tmp.IiRAB3zTcZ +++ return 0 ++ local client_container=psmdb-client-66f577db5f-bpv5m ++ local mongo_flag= ++ [[ clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-3967 == *cfg* ]] ++ replica_set=cfg ++ kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-cfg-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.l6v5URBMIm +++ mktemp ++ local LAST_ERR=/tmp/tmp.iCMbkmogNA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-cfg-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.l6v5URBMIm ++ cat /tmp/tmp.iCMbkmogNA ++ rm /tmp/tmp.l6v5URBMIm /tmp/tmp.iCMbkmogNA ++ return 0 + cfg_0_endpoint_actual=some-name-cfg-0.some-name-cfg.expose-sharded-3967.svc.cluster.local:27017 + rs0_0_endpoint=some-name-rs0-0.some-name-rs0.expose-sharded-3967.svc.cluster.local ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ run_mongo 'var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-rs0-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })' clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-3967 ++ local 'command=var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-rs0-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-3967 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7j2v9PopMF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.WrTYiyccoS +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.7j2v9PopMF +++ cat /tmp/tmp.WrTYiyccoS +++ rm /tmp/tmp.7j2v9PopMF /tmp/tmp.WrTYiyccoS +++ return 0 ++ local client_container=psmdb-client-66f577db5f-bpv5m ++ local mongo_flag= ++ [[ clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-3967 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-rs0-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3tYbcwkDXP +++ mktemp ++ local LAST_ERR=/tmp/tmp.OqBytwsuEE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-rs0-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3tYbcwkDXP ++ cat /tmp/tmp.OqBytwsuEE ++ rm /tmp/tmp.3tYbcwkDXP /tmp/tmp.OqBytwsuEE ++ return 0 + rs0_0_endpoint_actual=some-name-rs0-0.some-name-rs0.expose-sharded-3967.svc.cluster.local:27017 + [[ some-name-rs0-0.some-name-rs0.expose-sharded-3967.svc.cluster.local:27017 != \s\o\m\e\-\n\a\m\e\-\r\s\0\-\0\.\s\o\m\e\-\n\a\m\e\-\r\s\0\.\e\x\p\o\s\e\-\s\h\a\r\d\e\d\-\3\9\6\7\.\s\v\c\.\c\l\u\s\t\e\r\.\l\o\c\a\l\:\2\7\0\1\7 ]] + [[ some-name-cfg-0.some-name-cfg.expose-sharded-3967.svc.cluster.local:27017 != \s\o\m\e\-\n\a\m\e\-\c\f\g\-\0\.\s\o\m\e\-\n\a\m\e\-\c\f\g\.\e\x\p\o\s\e\-\s\h\a\r\d\e\d\-\3\9\6\7\.\s\v\c\.\c\l\u\s\t\e\r\.\l\o\c\a\l\:\2\7\0\1\7 ]] + desc 'Exposed, ClusterIP -> LoadBalancer' + set +o xtrace ----------------------------------------------------------------------------------- Exposed, ClusterIP -> LoadBalancer ----------------------------------------------------------------------------------- + expose_cluster LoadBalancer + expose_type=LoadBalancer + expose_status=true + kubectl_bin patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "type" : "LoadBalancer" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "LoadBalancer" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "type" : "LoadBalancer" } }]' ++ mktemp + local LAST_OUT=/tmp/tmp.ddY571HGBe ++ mktemp + local LAST_ERR=/tmp/tmp.blfE80oqyy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "type" : "LoadBalancer" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "LoadBalancer" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "type" : "LoadBalancer" } }]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ddY571HGBe perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.blfE80oqyy + rm /tmp/tmp.ddY571HGBe /tmp/tmp.blfE80oqyy + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2ykdkaQb1y +++ mktemp ++ local LAST_ERR=/tmp/tmp.9ydKbYKqis ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2ykdkaQb1y ++ cat /tmp/tmp.9ydKbYKqis ++ rm /tmp/tmp.2ykdkaQb1y /tmp/tmp.9ydKbYKqis ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GiLZkh7J4F +++ mktemp ++ local LAST_ERR=/tmp/tmp.c71QCnGXdH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GiLZkh7J4F ++ cat /tmp/tmp.c71QCnGXdH ++ rm /tmp/tmp.GiLZkh7J4F /tmp/tmp.c71QCnGXdH ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6LMOUzECbX +++ mktemp ++ local LAST_ERR=/tmp/tmp.dClE34UL7b ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6LMOUzECbX ++ cat /tmp/tmp.dClE34UL7b ++ rm /tmp/tmp.6LMOUzECbX /tmp/tmp.dClE34UL7b ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.O5bIMjoOTU +++ mktemp ++ local LAST_ERR=/tmp/tmp.HHm84PSt0S ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.O5bIMjoOTU ++ cat /tmp/tmp.HHm84PSt0S ++ rm /tmp/tmp.O5bIMjoOTU /tmp/tmp.HHm84PSt0S ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Y0qoR2Xtup +++ mktemp ++ local LAST_ERR=/tmp/tmp.g8buohki1g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Y0qoR2Xtup ++ cat /tmp/tmp.g8buohki1g ++ rm /tmp/tmp.Y0qoR2Xtup /tmp/tmp.g8buohki1g ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FuTsehWLmQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.AN7ieFncoy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FuTsehWLmQ ++ cat /tmp/tmp.AN7ieFncoy ++ rm /tmp/tmp.FuTsehWLmQ /tmp/tmp.AN7ieFncoy ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YSJkAHSJa4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.DoTMboHRMK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YSJkAHSJa4 ++ cat /tmp/tmp.DoTMboHRMK ++ rm /tmp/tmp.YSJkAHSJa4 /tmp/tmp.DoTMboHRMK ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pH0d5WHbPP +++ mktemp ++ local LAST_ERR=/tmp/tmp.DSXmBjHROK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pH0d5WHbPP ++ cat /tmp/tmp.DSXmBjHROK ++ rm /tmp/tmp.pH0d5WHbPP /tmp/tmp.DSXmBjHROK ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.J3Rqdu8nVx +++ mktemp ++ local LAST_ERR=/tmp/tmp.oQl1rYvyNW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.J3Rqdu8nVx ++ cat /tmp/tmp.oQl1rYvyNW ++ rm /tmp/tmp.J3Rqdu8nVx /tmp/tmp.oQl1rYvyNW ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K7NtObKdXx +++ mktemp ++ local LAST_ERR=/tmp/tmp.cOHIpPpdSt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.K7NtObKdXx ++ cat /tmp/tmp.cOHIpPpdSt ++ rm /tmp/tmp.K7NtObKdXx /tmp/tmp.cOHIpPpdSt ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + run_mongos 'use myApp\n db.test.insert({ x: 100502 })' myApp:myPass@some-name-mongos.expose-sharded-3967 + local 'command=use myApp\n db.test.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SMLWebExzK +++ mktemp ++ local LAST_ERR=/tmp/tmp.k0BR5cK3LL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SMLWebExzK ++ cat /tmp/tmp.k0BR5cK3LL ++ rm /tmp/tmp.SMLWebExzK /tmp/tmp.k0BR5cK3LL ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.v6H5gQxeoO ++ mktemp + local LAST_ERR=/tmp/tmp.OL0jjfiBi6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.v6H5gQxeoO Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("26c1fe21-501c-479d-bf55-eb2ba8fe2aaf") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.OL0jjfiBi6 + rm /tmp/tmp.v6H5gQxeoO /tmp/tmp.OL0jjfiBi6 + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-3967 -3nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-3967 + local postfix=-3nd + local suffix= + local database=myApp + local collection=test + local port=27017 + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-3967 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nRwxQv7YoY +++ mktemp ++ local LAST_ERR=/tmp/tmp.v9Sxc25cUT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nRwxQv7YoY ++ cat /tmp/tmp.v9Sxc25cUT ++ rm /tmp/tmp.nRwxQv7YoY /tmp/tmp.v9Sxc25cUT ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.GuRVY4sIKb ++ mktemp + local LAST_ERR=/tmp/tmp.05Q2l8pz7b + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GuRVY4sIKb + cat /tmp/tmp.05Q2l8pz7b + rm /tmp/tmp.GuRVY4sIKb /tmp/tmp.05Q2l8pz7b + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-3nd.json /tmp/tmp.TQYCeunbst/find-3nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 -3nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 + local postfix=-3nd + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-06-09T03:01:33+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kDAQhYPvIj +++ mktemp ++ local LAST_ERR=/tmp/tmp.fWZme7rsnt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kDAQhYPvIj ++ cat /tmp/tmp.fWZme7rsnt ++ rm /tmp/tmp.kDAQhYPvIj /tmp/tmp.fWZme7rsnt ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.SqdupYuezJ ++ mktemp + local LAST_ERR=/tmp/tmp.CnEunMqYfz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SqdupYuezJ + cat /tmp/tmp.CnEunMqYfz + rm /tmp/tmp.SqdupYuezJ /tmp/tmp.CnEunMqYfz + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-3nd.json /tmp/tmp.TQYCeunbst/find-3nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 -3nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 + local postfix=-3nd + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-06-09T03:01:36+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Mg9iGpUqUd +++ mktemp ++ local LAST_ERR=/tmp/tmp.h1tNJHw5P8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Mg9iGpUqUd ++ cat /tmp/tmp.h1tNJHw5P8 ++ rm /tmp/tmp.Mg9iGpUqUd /tmp/tmp.h1tNJHw5P8 ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.SQfNPylMj3 ++ mktemp + local LAST_ERR=/tmp/tmp.3SZdyurNnK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SQfNPylMj3 + cat /tmp/tmp.3SZdyurNnK + rm /tmp/tmp.SQfNPylMj3 /tmp/tmp.3SZdyurNnK + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-3nd.json /tmp/tmp.TQYCeunbst/find-3nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 -3nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 + local postfix=-3nd + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-06-09T03:01:40+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.F8wRigUzRJ +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_ERR=/tmp/tmp.4SyTay70FF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.F8wRigUzRJ ++ cat /tmp/tmp.4SyTay70FF ++ rm /tmp/tmp.F8wRigUzRJ /tmp/tmp.4SyTay70FF ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.w3YhnHj07i ++ mktemp + local LAST_ERR=/tmp/tmp.uyJ6NExKoL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.w3YhnHj07i + cat /tmp/tmp.uyJ6NExKoL + rm /tmp/tmp.w3YhnHj07i /tmp/tmp.uyJ6NExKoL + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-3nd.json /tmp/tmp.TQYCeunbst/find-3nd + sleep 60 + desc 'Pause Exposed cluster (LoadBalancer)' + set +o xtrace ----------------------------------------------------------------------------------- Pause Exposed cluster (LoadBalancer) ----------------------------------------------------------------------------------- + stop_cluster some-name + local cluster_name=some-name + local max_wait_time=120 + local passed_time=0 + local sleep_time=1 + kubectl_bin patch psmdb some-name --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.msUo6Ne4tR ++ mktemp + local LAST_ERR=/tmp/tmp.Tx9kluH09Y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.msUo6Ne4tR perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.Tx9kluH09Y + rm /tmp/tmp.msUo6Ne4tR /tmp/tmp.Tx9kluH09Y + return 0 + set +x Waiting for cluster stop...............Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found + start_cluster some-name + local cluster_name=some-name + kubectl_bin patch psmdb some-name --type json '-p=[{"op":"add","path":"/spec/pause","value":false}]' ++ mktemp + local LAST_OUT=/tmp/tmp.yX2q2kzpbO ++ mktemp + local LAST_ERR=/tmp/tmp.suUjoC3hvA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type json '-p=[{"op":"add","path":"/spec/pause","value":false}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yX2q2kzpbO perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.suUjoC3hvA + rm /tmp/tmp.yX2q2kzpbO /tmp/tmp.suUjoC3hvA + return 0 + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qnYdWDz0Qs +++ mktemp ++ local LAST_ERR=/tmp/tmp.buKtBz94m3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qnYdWDz0Qs ++ cat /tmp/tmp.buKtBz94m3 ++ rm /tmp/tmp.qnYdWDz0Qs /tmp/tmp.buKtBz94m3 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LJrp2vKlWt +++ mktemp ++ local LAST_ERR=/tmp/tmp.0gjQLgICzj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LJrp2vKlWt ++ cat /tmp/tmp.0gjQLgICzj ++ rm /tmp/tmp.LJrp2vKlWt /tmp/tmp.0gjQLgICzj ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sZDp0RK91P +++ mktemp ++ local LAST_ERR=/tmp/tmp.w5ONzFmdO5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sZDp0RK91P ++ cat /tmp/tmp.w5ONzFmdO5 ++ rm /tmp/tmp.sZDp0RK91P /tmp/tmp.w5ONzFmdO5 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VaJvJrzNRI +++ mktemp ++ local LAST_ERR=/tmp/tmp.Av4fNvrey1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VaJvJrzNRI ++ cat /tmp/tmp.Av4fNvrey1 ++ rm /tmp/tmp.VaJvJrzNRI /tmp/tmp.Av4fNvrey1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.b7Tf7cfju1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Tttyzu4H5o ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.b7Tf7cfju1 ++ cat /tmp/tmp.Tttyzu4H5o ++ rm /tmp/tmp.b7Tf7cfju1 /tmp/tmp.Tttyzu4H5o ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bu7XxNwLW0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.r6FNtA7HmV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bu7XxNwLW0 ++ cat /tmp/tmp.r6FNtA7HmV ++ rm /tmp/tmp.bu7XxNwLW0 /tmp/tmp.r6FNtA7HmV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0oXdpxZgWr +++ mktemp ++ local LAST_ERR=/tmp/tmp.QJBNEZxt4e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0oXdpxZgWr ++ cat /tmp/tmp.QJBNEZxt4e ++ rm /tmp/tmp.0oXdpxZgWr /tmp/tmp.QJBNEZxt4e ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nXOtATdzvc +++ mktemp ++ local LAST_ERR=/tmp/tmp.kgsMWhBdbU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nXOtATdzvc ++ cat /tmp/tmp.kgsMWhBdbU ++ rm /tmp/tmp.nXOtATdzvc /tmp/tmp.kgsMWhBdbU ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uk6ELrrcL0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.kYUZQDkc1I ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uk6ELrrcL0 ++ cat /tmp/tmp.kYUZQDkc1I ++ rm /tmp/tmp.uk6ELrrcL0 /tmp/tmp.kYUZQDkc1I ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Z78v80RL60 +++ mktemp ++ local LAST_ERR=/tmp/tmp.MAaX3nCkPQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Z78v80RL60 ++ cat /tmp/tmp.MAaX3nCkPQ ++ rm /tmp/tmp.Z78v80RL60 /tmp/tmp.MAaX3nCkPQ ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + run_mongos 'use myApp\n db.test.insert({ x: 100503 })' myApp:myPass@some-name-mongos.expose-sharded-3967 + local 'command=use myApp\n db.test.insert({ x: 100503 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Bw91tqJxBS +++ mktemp ++ local LAST_ERR=/tmp/tmp.sbsp0JcW5j ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Bw91tqJxBS ++ cat /tmp/tmp.sbsp0JcW5j ++ rm /tmp/tmp.Bw91tqJxBS /tmp/tmp.sbsp0JcW5j ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.xNX2fTVRzy ++ mktemp + local LAST_ERR=/tmp/tmp.IorMflYm5x + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xNX2fTVRzy Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("3d7ff4c7-c78e-4c1f-a9ec-7c0db7d24476") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.IorMflYm5x + rm /tmp/tmp.xNX2fTVRzy /tmp/tmp.IorMflYm5x + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-3967 -4nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-3967 + local postfix=-4nd + local suffix= + local database=myApp + local collection=test + local port=27017 + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-3967 mongodb '' '' 27017 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local uri=myApp:myPass@some-name-mongos.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ awk -F: '{print $2}' ++ echo .svc.cluster.local + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ErtaHRGnnQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.6754MOrHPF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ErtaHRGnnQ ++ cat /tmp/tmp.6754MOrHPF ++ rm /tmp/tmp.ErtaHRGnnQ /tmp/tmp.6754MOrHPF ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.3CeiSRL8Ob ++ mktemp + local LAST_ERR=/tmp/tmp.4tXvwAKPF2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3CeiSRL8Ob + cat /tmp/tmp.4tXvwAKPF2 + rm /tmp/tmp.3CeiSRL8Ob /tmp/tmp.4tXvwAKPF2 + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-4nd.json /tmp/tmp.TQYCeunbst/find-4nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 -4nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 + local postfix=-4nd + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-06-09T03:13:59+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uWb4zfSPzL +++ mktemp ++ local LAST_ERR=/tmp/tmp.UevvC4rBbu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uWb4zfSPzL ++ cat /tmp/tmp.UevvC4rBbu ++ rm /tmp/tmp.uWb4zfSPzL /tmp/tmp.UevvC4rBbu ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.gC4c2oOv4E ++ mktemp + local LAST_ERR=/tmp/tmp.7SsBWsgVQb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gC4c2oOv4E + cat /tmp/tmp.7SsBWsgVQb + rm /tmp/tmp.gC4c2oOv4E /tmp/tmp.7SsBWsgVQb + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-4nd.json /tmp/tmp.TQYCeunbst/find-4nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 -4nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 + local postfix=-4nd + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-06-09T03:14:01+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.a6nxdZiuoK +++ mktemp ++ local LAST_ERR=/tmp/tmp.Foeny7l2WO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.a6nxdZiuoK ++ cat /tmp/tmp.Foeny7l2WO ++ rm /tmp/tmp.a6nxdZiuoK /tmp/tmp.Foeny7l2WO ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.SyIZHPlgM5 ++ mktemp + local LAST_ERR=/tmp/tmp.fan1RsxzJT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SyIZHPlgM5 + cat /tmp/tmp.fan1RsxzJT + rm /tmp/tmp.SyIZHPlgM5 /tmp/tmp.fan1RsxzJT + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-4nd.json /tmp/tmp.TQYCeunbst/find-4nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 -4nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 + local postfix=-4nd + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-06-09T03:14:04+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fGByvOBZxD +++ mktemp ++ local LAST_ERR=/tmp/tmp.xccDplkNKo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fGByvOBZxD ++ cat /tmp/tmp.xccDplkNKo ++ rm /tmp/tmp.fGByvOBZxD /tmp/tmp.xccDplkNKo ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.xwOtgDeTkF ++ mktemp + local LAST_ERR=/tmp/tmp.hOACN5ixH0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xwOtgDeTkF + cat /tmp/tmp.hOACN5ixH0 + rm /tmp/tmp.xwOtgDeTkF /tmp/tmp.hOACN5ixH0 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-4nd.json /tmp/tmp.TQYCeunbst/find-4nd + desc 'Exposed, LoadBalancer -> ClusterIP' + set +o xtrace ----------------------------------------------------------------------------------- Exposed, LoadBalancer -> ClusterIP ----------------------------------------------------------------------------------- + expose_cluster ClusterIP + expose_type=ClusterIP + expose_status=true + kubectl_bin patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "type" : "ClusterIP" } }]' ++ mktemp + local LAST_OUT=/tmp/tmp.KAgBjAnUFe ++ mktemp + local LAST_ERR=/tmp/tmp.wJRopYJqh8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "type" : "ClusterIP" } }]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KAgBjAnUFe perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.wJRopYJqh8 + rm /tmp/tmp.KAgBjAnUFe /tmp/tmp.wJRopYJqh8 + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QLag2h5xRp +++ mktemp ++ local LAST_ERR=/tmp/tmp.zhL8ceKMpj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QLag2h5xRp ++ cat /tmp/tmp.zhL8ceKMpj ++ rm /tmp/tmp.QLag2h5xRp /tmp/tmp.zhL8ceKMpj ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KyYH8FeMcS +++ mktemp ++ local LAST_ERR=/tmp/tmp.lOYK0oLumo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KyYH8FeMcS ++ cat /tmp/tmp.lOYK0oLumo ++ rm /tmp/tmp.KyYH8FeMcS /tmp/tmp.lOYK0oLumo ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8Wr5wg3hzd +++ mktemp ++ local LAST_ERR=/tmp/tmp.4gqyrLad5M ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8Wr5wg3hzd ++ cat /tmp/tmp.4gqyrLad5M ++ rm /tmp/tmp.8Wr5wg3hzd /tmp/tmp.4gqyrLad5M ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2CFnviSN7i +++ mktemp ++ local LAST_ERR=/tmp/tmp.qkSBl1ThX7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2CFnviSN7i ++ cat /tmp/tmp.qkSBl1ThX7 ++ rm /tmp/tmp.2CFnviSN7i /tmp/tmp.qkSBl1ThX7 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.n3U7F626hu +++ mktemp ++ local LAST_ERR=/tmp/tmp.oad0BVl3fd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.n3U7F626hu ++ cat /tmp/tmp.oad0BVl3fd ++ rm /tmp/tmp.n3U7F626hu /tmp/tmp.oad0BVl3fd ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DsnnR0bUsl +++ mktemp ++ local LAST_ERR=/tmp/tmp.Javz5OnwlI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DsnnR0bUsl ++ cat /tmp/tmp.Javz5OnwlI ++ rm /tmp/tmp.DsnnR0bUsl /tmp/tmp.Javz5OnwlI ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lxzXxbviDZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.tn19LUuYRi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lxzXxbviDZ ++ cat /tmp/tmp.tn19LUuYRi ++ rm /tmp/tmp.lxzXxbviDZ /tmp/tmp.tn19LUuYRi ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GR4JarCTgX +++ mktemp ++ local LAST_ERR=/tmp/tmp.qpD70xnfGh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GR4JarCTgX ++ cat /tmp/tmp.qpD70xnfGh ++ rm /tmp/tmp.GR4JarCTgX /tmp/tmp.qpD70xnfGh ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VXaPMd4keW +++ mktemp ++ local LAST_ERR=/tmp/tmp.OFLXRynNtB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VXaPMd4keW ++ cat /tmp/tmp.OFLXRynNtB ++ rm /tmp/tmp.VXaPMd4keW /tmp/tmp.OFLXRynNtB ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nsfTK1sHZ2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.8smNLGnS1W ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nsfTK1sHZ2 ++ cat /tmp/tmp.8smNLGnS1W ++ rm /tmp/tmp.nsfTK1sHZ2 /tmp/tmp.8smNLGnS1W ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + run_mongos 'use myApp\n db.test.insert({ x: 100504 })' myApp:myPass@some-name-mongos.expose-sharded-3967 + local 'command=use myApp\n db.test.insert({ x: 100504 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MIWOqY0oct +++ mktemp ++ local LAST_ERR=/tmp/tmp.pwgHNsSLFB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MIWOqY0oct ++ cat /tmp/tmp.pwgHNsSLFB ++ rm /tmp/tmp.MIWOqY0oct /tmp/tmp.pwgHNsSLFB ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100504 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.vyit1S6AMT ++ mktemp + local LAST_ERR=/tmp/tmp.uuBnnKqMRv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100504 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vyit1S6AMT Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("cffbc66f-8fce-40ad-838b-6aec72982afb") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.uuBnnKqMRv + rm /tmp/tmp.vyit1S6AMT /tmp/tmp.uuBnnKqMRv + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-3967 -5nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-3967 + local postfix=-5nd + local suffix= + local database=myApp + local collection=test + local port=27017 + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-3967 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ awk -F: '{print $2}' ++ echo .svc.cluster.local + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6TyCluFOv5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.fm5gl4rM1e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6TyCluFOv5 ++ cat /tmp/tmp.fm5gl4rM1e ++ rm /tmp/tmp.6TyCluFOv5 /tmp/tmp.fm5gl4rM1e ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.puCyOi0RCQ ++ mktemp + local LAST_ERR=/tmp/tmp.2EX7qLnEHu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.puCyOi0RCQ + cat /tmp/tmp.2EX7qLnEHu + rm /tmp/tmp.puCyOi0RCQ /tmp/tmp.2EX7qLnEHu + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-5nd.json /tmp/tmp.TQYCeunbst/find-5nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 -5nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 + local postfix=-5nd + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-06-09T03:15:22+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.M0wUbiVtUI +++ mktemp ++ local LAST_ERR=/tmp/tmp.qAy3yLvK6i ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M0wUbiVtUI ++ cat /tmp/tmp.qAy3yLvK6i ++ rm /tmp/tmp.M0wUbiVtUI /tmp/tmp.qAy3yLvK6i ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.aL8jHnoWFr ++ mktemp + local LAST_ERR=/tmp/tmp.bgPH60DPCz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aL8jHnoWFr + cat /tmp/tmp.bgPH60DPCz + rm /tmp/tmp.aL8jHnoWFr /tmp/tmp.bgPH60DPCz + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-5nd.json /tmp/tmp.TQYCeunbst/find-5nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 -5nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 + local postfix=-5nd + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-06-09T03:15:24+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5GVfIpsMld + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_ERR=/tmp/tmp.5KdfAuwWlL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5GVfIpsMld ++ cat /tmp/tmp.5KdfAuwWlL ++ rm /tmp/tmp.5GVfIpsMld /tmp/tmp.5KdfAuwWlL ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.1Nc9er39Xa ++ mktemp + local LAST_ERR=/tmp/tmp.fkiRHXIhSi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1Nc9er39Xa + cat /tmp/tmp.fkiRHXIhSi + rm /tmp/tmp.1Nc9er39Xa /tmp/tmp.fkiRHXIhSi + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-5nd.json /tmp/tmp.TQYCeunbst/find-5nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 -5nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 + local postfix=-5nd + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-06-09T03:15:26+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local LAST_OUT=/tmp/tmp.l72AHt6dMV +++ mktemp ++ local LAST_ERR=/tmp/tmp.GvfNPDfrtw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.l72AHt6dMV ++ cat /tmp/tmp.GvfNPDfrtw ++ rm /tmp/tmp.l72AHt6dMV /tmp/tmp.GvfNPDfrtw ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.IGtRBBuW28 ++ mktemp + local LAST_ERR=/tmp/tmp.bNhk0ZsScq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IGtRBBuW28 + cat /tmp/tmp.bNhk0ZsScq + rm /tmp/tmp.IGtRBBuW28 /tmp/tmp.bNhk0ZsScq + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-5nd.json /tmp/tmp.TQYCeunbst/find-5nd + desc 'Exposed -> Unexposed' + set +o xtrace ----------------------------------------------------------------------------------- Exposed -> Unexposed ----------------------------------------------------------------------------------- + expose_cluster ClusterIP false + expose_type=ClusterIP + expose_status=false + kubectl_bin patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": false, "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": false, "type" : "ClusterIP" } }]' ++ mktemp + local LAST_OUT=/tmp/tmp.BNSpvMdInw ++ mktemp + local LAST_ERR=/tmp/tmp.FQhqKcWnfV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": false, "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": false, "type" : "ClusterIP" } }]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BNSpvMdInw perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.FQhqKcWnfV + rm /tmp/tmp.BNSpvMdInw /tmp/tmp.FQhqKcWnfV + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LjJUthgfcO +++ mktemp ++ local LAST_ERR=/tmp/tmp.4NV7IzRaN4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LjJUthgfcO ++ cat /tmp/tmp.4NV7IzRaN4 ++ rm /tmp/tmp.LjJUthgfcO /tmp/tmp.4NV7IzRaN4 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dLRHwW73pu +++ mktemp ++ local LAST_ERR=/tmp/tmp.Zqbee2A66Z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dLRHwW73pu ++ cat /tmp/tmp.Zqbee2A66Z ++ rm /tmp/tmp.dLRHwW73pu /tmp/tmp.Zqbee2A66Z ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Qkac4BPGnW +++ mktemp ++ local LAST_ERR=/tmp/tmp.rX8lQPQFqy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Qkac4BPGnW ++ cat /tmp/tmp.rX8lQPQFqy ++ rm /tmp/tmp.Qkac4BPGnW /tmp/tmp.rX8lQPQFqy ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.I19JV3DKrQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.frxcfc0Zkz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.I19JV3DKrQ ++ cat /tmp/tmp.frxcfc0Zkz ++ rm /tmp/tmp.I19JV3DKrQ /tmp/tmp.frxcfc0Zkz ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gMo9kexFyM +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ie2CRT2LUB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gMo9kexFyM ++ cat /tmp/tmp.Ie2CRT2LUB ++ rm /tmp/tmp.gMo9kexFyM /tmp/tmp.Ie2CRT2LUB ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HkrWV9bnlG +++ mktemp ++ local LAST_ERR=/tmp/tmp.uFfLNG44nU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HkrWV9bnlG ++ cat /tmp/tmp.uFfLNG44nU ++ rm /tmp/tmp.HkrWV9bnlG /tmp/tmp.uFfLNG44nU ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tAMG4KXmUQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.xOJatAWDL6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tAMG4KXmUQ ++ cat /tmp/tmp.xOJatAWDL6 ++ rm /tmp/tmp.tAMG4KXmUQ /tmp/tmp.xOJatAWDL6 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XcMf3KS4o3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.DVHlvd12jn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XcMf3KS4o3 ++ cat /tmp/tmp.DVHlvd12jn ++ rm /tmp/tmp.XcMf3KS4o3 /tmp/tmp.DVHlvd12jn ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mxjN511L85 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Vicsjw7jw4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mxjN511L85 ++ cat /tmp/tmp.Vicsjw7jw4 ++ rm /tmp/tmp.mxjN511L85 /tmp/tmp.Vicsjw7jw4 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AvoDJXODDC +++ mktemp ++ local LAST_ERR=/tmp/tmp.uk3Cl0afTd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AvoDJXODDC ++ cat /tmp/tmp.uk3Cl0afTd ++ rm /tmp/tmp.AvoDJXODDC /tmp/tmp.uk3Cl0afTd ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + run_mongos 'use myApp\n db.test.insert({ x: 100505 })' myApp:myPass@some-name-mongos.expose-sharded-3967 + local 'command=use myApp\n db.test.insert({ x: 100505 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ awk -F: '{print $2}' ++ echo .svc.cluster.local + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kOzraxAgRz +++ mktemp ++ local LAST_ERR=/tmp/tmp.h2E21W5eQB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kOzraxAgRz ++ cat /tmp/tmp.h2E21W5eQB ++ rm /tmp/tmp.kOzraxAgRz /tmp/tmp.h2E21W5eQB ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100505 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.lGg7YWFOTc ++ mktemp + local LAST_ERR=/tmp/tmp.yZfE4FE1iA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100505 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lGg7YWFOTc Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("ce7fdc35-6853-4f21-b60a-70c71b440a0f") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.yZfE4FE1iA + rm /tmp/tmp.lGg7YWFOTc /tmp/tmp.yZfE4FE1iA + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-3967 -6nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-3967 + local postfix=-6nd + local suffix= + local database=myApp + local collection=test + local port=27017 + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-3967 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oPXmXVyl7I +++ mktemp ++ local LAST_ERR=/tmp/tmp.VDpHD2iLmy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oPXmXVyl7I ++ cat /tmp/tmp.VDpHD2iLmy ++ rm /tmp/tmp.oPXmXVyl7I /tmp/tmp.VDpHD2iLmy ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.wTs0EVK10P ++ mktemp + local LAST_ERR=/tmp/tmp.UunvzH5d9b + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-3967.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wTs0EVK10P + cat /tmp/tmp.UunvzH5d9b + rm /tmp/tmp.wTs0EVK10P /tmp/tmp.UunvzH5d9b + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-6nd.json /tmp/tmp.TQYCeunbst/find-6nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 -6nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 + local postfix=-6nd + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-06-09T03:16:44+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SOyOboT7E1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.RGjeDTgmDY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SOyOboT7E1 ++ cat /tmp/tmp.RGjeDTgmDY ++ rm /tmp/tmp.SOyOboT7E1 /tmp/tmp.RGjeDTgmDY ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.RtRQ5nywQl ++ mktemp + local LAST_ERR=/tmp/tmp.DJUoHzEQXD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RtRQ5nywQl + cat /tmp/tmp.DJUoHzEQXD + rm /tmp/tmp.RtRQ5nywQl /tmp/tmp.DJUoHzEQXD + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-6nd.json /tmp/tmp.TQYCeunbst/find-6nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 -6nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 + local postfix=-6nd + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-06-09T03:16:46+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 + local driver=mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E9KqHHtDij +++ mktemp ++ local LAST_ERR=/tmp/tmp.0hgrBjFZVU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E9KqHHtDij ++ cat /tmp/tmp.0hgrBjFZVU ++ rm /tmp/tmp.E9KqHHtDij /tmp/tmp.0hgrBjFZVU ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.7pJHnBOhLx ++ mktemp + local LAST_ERR=/tmp/tmp.Wg6hyD96KV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7pJHnBOhLx + cat /tmp/tmp.Wg6hyD96KV + rm /tmp/tmp.7pJHnBOhLx /tmp/tmp.Wg6hyD96KV + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-6nd.json /tmp/tmp.TQYCeunbst/find-6nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 -6nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 + local postfix=-6nd + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-06-09T03:16:49+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xse4VliKW2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.eGUrjTJOSW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xse4VliKW2 ++ cat /tmp/tmp.eGUrjTJOSW ++ rm /tmp/tmp.xse4VliKW2 /tmp/tmp.eGUrjTJOSW ++ return 0 + local client_container=psmdb-client-66f577db5f-bpv5m + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.NqimC3JpPb ++ mktemp + local LAST_ERR=/tmp/tmp.jctBiOehLW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-bpv5m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-3967.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NqimC3JpPb + cat /tmp/tmp.jctBiOehLW + rm /tmp/tmp.NqimC3JpPb /tmp/tmp.jctBiOehLW + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/expose-sharded/compare/find-6nd.json /tmp/tmp.TQYCeunbst/find-6nd + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/conf/container-rc.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.UXPjI0rQbT ++ mktemp + local LAST_ERR=/tmp/tmp.NUTDkQwUEa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/e2e-tests/conf/container-rc.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UXPjI0rQbT runtimeclass.node.k8s.io "container-rc" deleted + cat /tmp/tmp.NUTDkQwUEa + rm /tmp/tmp.UXPjI0rQbT /tmp/tmp.NUTDkQwUEa + return 0 + destroy expose-sharded-3967 + local namespace=expose-sharded-3967 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ wc -l ++ kubectl_bin get psmdb-backup --no-headers +++ mktemp ++ local LAST_OUT=/tmp/tmp.JfoMGK4OYS +++ mktemp ++ local LAST_ERR=/tmp/tmp.LIczhyDBNU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JfoMGK4OYS ++ cat /tmp/tmp.LIczhyDBNU No resources found in expose-sharded-3967 namespace. ++ rm /tmp/tmp.JfoMGK4OYS /tmp/tmp.LIczhyDBNU ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.NmbKAWVOC7 ++ mktemp + local LAST_ERR=/tmp/tmp.zOQF5OSFo5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NmbKAWVOC7 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.zOQF5OSFo5 + rm /tmp/tmp.NmbKAWVOC7 /tmp/tmp.zOQF5OSFo5 + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/deploy/crd.yaml + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.SlBeUf07IS ++ mktemp + local LAST_ERR=/tmp/tmp.EeC2yM3yvx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SlBeUf07IS + cat /tmp/tmp.EeC2yM3yvx + rm /tmp/tmp.SlBeUf07IS /tmp/tmp.EeC2yM3yvx + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ESsOpmur6R ++ mktemp + local LAST_ERR=/tmp/tmp.Ba8FWAn5zF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ESsOpmur6R + cat /tmp/tmp.Ba8FWAn5zF + rm /tmp/tmp.ESsOpmur6R /tmp/tmp.Ba8FWAn5zF + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.YquYaluucS ++ mktemp + local LAST_ERR=/tmp/tmp.Kvsagd4eiU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YquYaluucS + cat /tmp/tmp.Kvsagd4eiU + rm /tmp/tmp.YquYaluucS /tmp/tmp.Kvsagd4eiU + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.sSojhJjhVb ++ mktemp + local LAST_ERR=/tmp/tmp.bXaoopU6Ay + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1965/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sSojhJjhVb clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.bXaoopU6Ay + rm /tmp/tmp.sSojhJjhVb /tmp/tmp.bXaoopU6Ay + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.GirwTdXyIV ++ mktemp + local LAST_ERR=/tmp/tmp.Py778OJqLH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.GirwTdXyIV + cat /tmp/tmp.Py778OJqLH Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.GirwTdXyIV + cat /tmp/tmp.Py778OJqLH Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.GirwTdXyIV + cat /tmp/tmp.Py778OJqLH Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.GirwTdXyIV + cat /tmp/tmp.Py778OJqLH Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.GirwTdXyIV /tmp/tmp.Py778OJqLH + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace expose-sharded-3967 + rm -rf /tmp/tmp.TQYCeunbst ++ mktemp + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.X57Cr0rmiU + local LAST_OUT=/tmp/tmp.wL7xcZIBUm ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.WjOw7dWWFq + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.cuHZ1OFUTa + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace expose-sharded-3967 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator