++ echo 'Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/logs/expose-sharded.log' Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/logs/expose-sharded.log ++ '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP_AZURE= ++ oc get projects ++ kubectl get nodes ++ grep '^minikube' +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' WARNING: version difference between client (1.30) and server (1.27) exceeds the supported minor version skew of +/-1 ++ '[' ']' ++ EKS=0 +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep gke WARNING: version difference between client (1.30) and server (1.27) exceeds the supported minor version skew of +/-1 ++ '[' v1.27.16-gke.1008000 ']' ++ GKE=1 +++ /usr/bin/sed -r 's/[^0-9.]+//g' +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' WARNING: version difference between client (1.30) and server (1.27) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.27 + main + create_infra expose-sharded-10972 + local ns=expose-sharded-10972 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.9XyShppacd ++ mktemp + local LAST_ERR=/tmp/tmp.W2oE5KSI8M + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9XyShppacd customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.W2oE5KSI8M + rm /tmp/tmp.9XyShppacd /tmp/tmp.W2oE5KSI8M + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide E0731 15:16:42.633557 20910 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:42.994144 20910 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:43.152456 20910 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:43.274714 20910 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:43.432037 20910 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' E0731 15:16:45.661717 21149 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:45.942314 21149 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:46.246773 21149 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:46.417588 21149 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:46.694373 21149 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.iz7LYuthpJ ++ mktemp + local LAST_ERR=/tmp/tmp.qdSr2i9otP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iz7LYuthpJ + cat /tmp/tmp.qdSr2i9otP + rm /tmp/tmp.iz7LYuthpJ /tmp/tmp.qdSr2i9otP + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE E0731 15:16:49.343986 21562 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:49.665367 21562 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:49.870658 21562 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:50.059127 21562 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:50.224939 21562 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' E0731 15:16:51.891807 21741 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:52.107381 21741 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:52.235869 21741 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:52.360928 21741 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:52.488556 21741 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.o8xIa58oyl ++ mktemp + local LAST_ERR=/tmp/tmp.fHnjRV7Dr6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.o8xIa58oyl + cat /tmp/tmp.fHnjRV7Dr6 + rm /tmp/tmp.o8xIa58oyl /tmp/tmp.fHnjRV7Dr6 + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' E0731 15:16:55.370705 22215 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:55.639534 22215 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:55.760806 22215 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:55.904140 22215 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:56.025866 22215 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' E0731 15:16:57.368686 22416 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:57.639562 22416 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:57.753300 22416 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:57.879008 22416 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:16:58.000220 22416 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.khVryw7KkP ++ mktemp + local LAST_ERR=/tmp/tmp.VqqUzezLi5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.khVryw7KkP + cat /tmp/tmp.VqqUzezLi5 + rm /tmp/tmp.khVryw7KkP /tmp/tmp.VqqUzezLi5 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.8yzUsR0gXt ++ mktemp + local LAST_ERR=/tmp/tmp.E5SJTlC8ip + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8yzUsR0gXt clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.E5SJTlC8ip + rm /tmp/tmp.8yzUsR0gXt /tmp/tmp.E5SJTlC8ip + return 0 + check_crd_for_deletion PR-1612-57a92fde + local git_tag=PR-1612-57a92fde ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1612-57a92fde/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed s/---//g ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cG1Sn0CGTp +++ mktemp ++ local LAST_ERR=/tmp/tmp.iZG9uILM28 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.cG1Sn0CGTp ++ cat /tmp/tmp.iZG9uILM28 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.cG1Sn0CGTp ++ cat /tmp/tmp.iZG9uILM28 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.cG1Sn0CGTp ++ cat /tmp/tmp.iZG9uILM28 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.cG1Sn0CGTp ++ cat /tmp/tmp.iZG9uILM28 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.cG1Sn0CGTp /tmp/tmp.iZG9uILM28 ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ awk '-F ' '{print $2}' ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh E0731 15:17:22.841742 25808 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0731 15:17:22.950404 25808 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request error: unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + xargs kubectl delete ns + kubectl_bin get ns ++ mktemp + local LAST_OUT=/tmp/tmp.mDpld2ZEE7 ++ mktemp + local LAST_ERR=/tmp/tmp.glo6FcbfMw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + awk '{print$1}' + local LAST_OUT=/tmp/tmp.c89cekncye + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ mktemp + local LAST_ERR=/tmp/tmp.XocnoJ9DK2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mDpld2ZEE7 + cat /tmp/tmp.glo6FcbfMw + rm /tmp/tmp.mDpld2ZEE7 /tmp/tmp.glo6FcbfMw + return 0 namespace "expose-sharded-22849" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.c89cekncye namespace "psmdb-operator" deleted + cat /tmp/tmp.XocnoJ9DK2 + rm /tmp/tmp.c89cekncye /tmp/tmp.XocnoJ9DK2 + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.2QKKxZQ4Rs ++ mktemp + local LAST_ERR=/tmp/tmp.mIpfP0a2qm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2QKKxZQ4Rs + cat /tmp/tmp.mIpfP0a2qm + rm /tmp/tmp.2QKKxZQ4Rs /tmp/tmp.mIpfP0a2qm + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.n9fzXOfSyO ++ mktemp + local LAST_ERR=/tmp/tmp.2oUqnwHiWs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.n9fzXOfSyO namespace/psmdb-operator created + cat /tmp/tmp.2oUqnwHiWs + rm /tmp/tmp.n9fzXOfSyO /tmp/tmp.2oUqnwHiWs + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.KSYiscgdeV +++ mktemp ++ local LAST_ERR=/tmp/tmp.q5j802eYYx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KSYiscgdeV ++ cat /tmp/tmp.q5j802eYYx ++ rm /tmp/tmp.KSYiscgdeV /tmp/tmp.q5j802eYYx ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1612-57a92fde-7-cluster4 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.I9a44gSBDz ++ mktemp + local LAST_ERR=/tmp/tmp.kx9gUbUqzY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1612-57a92fde-7-cluster4 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I9a44gSBDz Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1612-57a92fde-7-cluster4" modified. + cat /tmp/tmp.kx9gUbUqzY + rm /tmp/tmp.I9a44gSBDz /tmp/tmp.kx9gUbUqzY + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.8SGDl1siQO ++ mktemp + local LAST_ERR=/tmp/tmp.8rig3mp6SU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8SGDl1siQO customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.8rig3mp6SU + rm /tmp/tmp.8SGDl1siQO /tmp/tmp.8rig3mp6SU + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/cw-rbac.yaml + kubectl_bin apply -n psmdb-operator -f - + sed -e 's^namespace: .*^namespace: psmdb-operator^' ++ mktemp + local LAST_OUT=/tmp/tmp.oF1CRXRher ++ mktemp + local LAST_ERR=/tmp/tmp.kk1HrKbdqG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oF1CRXRher clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.kk1HrKbdqG + rm /tmp/tmp.oF1CRXRher /tmp/tmp.kk1HrKbdqG + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1612-57a92fde") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.zYlzCsnTNg ++ mktemp + local LAST_ERR=/tmp/tmp.sSdBhospP0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zYlzCsnTNg deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.sSdBhospP0 + rm /tmp/tmp.zYlzCsnTNg /tmp/tmp.sSdBhospP0 + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.RqG7VL7GAl +++ mktemp ++ local LAST_ERR=/tmp/tmp.w0ka3HAguL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RqG7VL7GAl ++ cat /tmp/tmp.w0ka3HAguL ++ rm /tmp/tmp.RqG7VL7GAl /tmp/tmp.w0ka3HAguL ++ return 0 + wait_pod percona-server-mongodb-operator-bdc5b774b-s7m7t + local pod=percona-server-mongodb-operator-bdc5b774b-s7m7t + set +o xtrace waiting for pod/percona-server-mongodb-operator-bdc5b774b-s7m7t to be ready.OK + create_namespace expose-sharded-10972 + local namespace=expose-sharded-10972 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// ++ tail -n1 ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces expose-sharded-10972' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces expose-sharded-10972 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace expose-sharded-10972 --ignore-not-found ++ mktemp + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.EmfZEbnUsc + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.s1MksT0bsO + local exit_status=0 + local timeout=4 + local LAST_OUT=/tmp/tmp.xIAm6ATVpR ++ seq 0 2 ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl delete namespace expose-sharded-10972 --ignore-not-found + local LAST_ERR=/tmp/tmp.9GiHx88vxj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xIAm6ATVpR + cat /tmp/tmp.9GiHx88vxj + rm /tmp/tmp.xIAm6ATVpR /tmp/tmp.9GiHx88vxj + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EmfZEbnUsc + cat /tmp/tmp.s1MksT0bsO + rm /tmp/tmp.EmfZEbnUsc /tmp/tmp.s1MksT0bsO + return 0 + kubectl_bin wait --for=delete namespace expose-sharded-10972 ++ mktemp + local LAST_OUT=/tmp/tmp.xWQ2O5EhEV ++ mktemp + local LAST_ERR=/tmp/tmp.czRVcLQwq8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace expose-sharded-10972 namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xWQ2O5EhEV + cat /tmp/tmp.czRVcLQwq8 + rm /tmp/tmp.xWQ2O5EhEV /tmp/tmp.czRVcLQwq8 + return 0 + desc 'create namespace expose-sharded-10972' + set +o xtrace ----------------------------------------------------------------------------------- create namespace expose-sharded-10972 ----------------------------------------------------------------------------------- + kubectl_bin create namespace expose-sharded-10972 ++ mktemp + local LAST_OUT=/tmp/tmp.0G0UrqHyXr ++ mktemp + local LAST_ERR=/tmp/tmp.DihrZRaAPk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace expose-sharded-10972 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0G0UrqHyXr namespace/expose-sharded-10972 created + cat /tmp/tmp.DihrZRaAPk + rm /tmp/tmp.0G0UrqHyXr /tmp/tmp.DihrZRaAPk + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.aADJeWl8Ea +++ mktemp ++ local LAST_ERR=/tmp/tmp.tySEVRy3TR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aADJeWl8Ea ++ cat /tmp/tmp.tySEVRy3TR ++ rm /tmp/tmp.aADJeWl8Ea /tmp/tmp.tySEVRy3TR ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1612-57a92fde-7-cluster4 --namespace=expose-sharded-10972 ++ mktemp + local LAST_OUT=/tmp/tmp.2BHdty6dJ9 ++ mktemp + local LAST_ERR=/tmp/tmp.V52CHNAF9L + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1612-57a92fde-7-cluster4 --namespace=expose-sharded-10972 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2BHdty6dJ9 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1612-57a92fde-7-cluster4" modified. + cat /tmp/tmp.V52CHNAF9L + rm /tmp/tmp.2BHdty6dJ9 /tmp/tmp.V52CHNAF9L + return 0 + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + cluster=some-name + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ZfBsyGzgLZ ++ mktemp + local LAST_ERR=/tmp/tmp.JvwkHsw4Hw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZfBsyGzgLZ secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.JvwkHsw4Hw + rm /tmp/tmp.ZfBsyGzgLZ /tmp/tmp.JvwkHsw4Hw + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.qoISfsHMDk ++ mktemp + local LAST_ERR=/tmp/tmp.fBZ4vvPlH6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qoISfsHMDk secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.fBZ4vvPlH6 + rm /tmp/tmp.qoISfsHMDk /tmp/tmp.fBZ4vvPlH6 + return 0 + version_gt 1.19 ++ bc -l ++ echo '1.27 >= 1.19' + '[' 1 -eq 1 ']' + return 0 + '[' 0 -ne 1 ']' + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/conf/container-rc.yaml + /usr/bin/sed s/docker/runc/g ++ mktemp + local LAST_OUT=/tmp/tmp.XVrzoXbLgU ++ mktemp + local LAST_ERR=/tmp/tmp.ulmx9bkWQI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XVrzoXbLgU runtimeclass.node.k8s.io/container-rc unchanged + cat /tmp/tmp.ulmx9bkWQI + rm /tmp/tmp.XVrzoXbLgU /tmp/tmp.ulmx9bkWQI + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/conf/some-name-rs0.yml + kubectl_bin apply -f - + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' ++ mktemp + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/conf/some-name-rs0.yml + local LAST_OUT=/tmp/tmp.uu3J3b1QDB + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' ++ mktemp + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1612-57a92fde"' + local LAST_ERR=/tmp/tmp.MY8mCUqn6O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uu3J3b1QDB perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.MY8mCUqn6O + rm /tmp/tmp.uu3J3b1QDB /tmp/tmp.MY8mCUqn6O + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready..................OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready...................OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ydQb9zq714 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ObGRPPhgPy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ydQb9zq714 ++ cat /tmp/tmp.ObGRPPhgPy ++ rm /tmp/tmp.ydQb9zq714 /tmp/tmp.ObGRPPhgPy ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready..........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2DgySygGHN +++ mktemp ++ local LAST_ERR=/tmp/tmp.NO3qTeUmmu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2DgySygGHN ++ cat /tmp/tmp.NO3qTeUmmu ++ rm /tmp/tmp.2DgySygGHN /tmp/tmp.NO3qTeUmmu ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.......... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AcNFBj9Y8l +++ mktemp ++ local LAST_ERR=/tmp/tmp.S8eDP3gKJo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AcNFBj9Y8l ++ cat /tmp/tmp.S8eDP3gKJo ++ rm /tmp/tmp.AcNFBj9Y8l /tmp/tmp.S8eDP3gKJo ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wU53bXcG2f +++ mktemp ++ local LAST_ERR=/tmp/tmp.5gDUnhIiMW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wU53bXcG2f ++ cat /tmp/tmp.5gDUnhIiMW ++ rm /tmp/tmp.wU53bXcG2f /tmp/tmp.5gDUnhIiMW ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kM5UfV83sM +++ mktemp ++ local LAST_ERR=/tmp/tmp.HEkKIrgOTp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kM5UfV83sM ++ cat /tmp/tmp.HEkKIrgOTp ++ rm /tmp/tmp.kM5UfV83sM /tmp/tmp.HEkKIrgOTp ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IQhnEbkhyf +++ mktemp ++ local LAST_ERR=/tmp/tmp.Mo8Syv33lT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IQhnEbkhyf ++ cat /tmp/tmp.Mo8Syv33lT ++ rm /tmp/tmp.IQhnEbkhyf /tmp/tmp.Mo8Syv33lT ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZQZapj6OpN +++ mktemp ++ local LAST_ERR=/tmp/tmp.GY4EVi9Z25 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZQZapj6OpN ++ cat /tmp/tmp.GY4EVi9Z25 ++ rm /tmp/tmp.ZQZapj6OpN /tmp/tmp.GY4EVi9Z25 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.azxQG9D5DO/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.lL3fMxnm7s ++ mktemp + local LAST_ERR=/tmp/tmp.XI7Ne47Y14 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-10972", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lL3fMxnm7s + cat /tmp/tmp.XI7Ne47Y14 + rm /tmp/tmp.lL3fMxnm7s /tmp/tmp.XI7Ne47Y14 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.azxQG9D5DO/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.azxQG9D5DO/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.azxQG9D5DO/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0.yml /tmp/tmp.azxQG9D5DO/statefulset_some-name-rs0.yml + compare_kubectl statefulset/some-name-cfg + local resource=statefulset/some-name-cfg + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml + local new_result=/tmp/tmp.azxQG9D5DO/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-10972", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.IBxM7ZuFHH ++ mktemp + local LAST_ERR=/tmp/tmp.KuEkEEnvGc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IBxM7ZuFHH + cat /tmp/tmp.KuEkEEnvGc + rm /tmp/tmp.IBxM7ZuFHH /tmp/tmp.KuEkEEnvGc + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.azxQG9D5DO/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.azxQG9D5DO/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.azxQG9D5DO/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml /tmp/tmp.azxQG9D5DO/statefulset_some-name-cfg.yml + compare_kubectl statefulset/some-name-mongos '' + local resource=statefulset/some-name-mongos + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml + local new_result=/tmp/tmp.azxQG9D5DO/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-10972", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.TvON8VYEEc ++ mktemp + local LAST_ERR=/tmp/tmp.oDNsuw1Nu2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TvON8VYEEc + cat /tmp/tmp.oDNsuw1Nu2 + rm /tmp/tmp.TvON8VYEEc /tmp/tmp.oDNsuw1Nu2 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.azxQG9D5DO/statefulset_some-name-mongos.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.azxQG9D5DO/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.azxQG9D5DO/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml /tmp/tmp.azxQG9D5DO/statefulset_some-name-mongos.yml + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.expose-sharded-10972 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xkKyu9QqjH +++ mktemp ++ local LAST_ERR=/tmp/tmp.HJnpA5zW9E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xkKyu9QqjH ++ cat /tmp/tmp.HJnpA5zW9E ++ rm /tmp/tmp.xkKyu9QqjH /tmp/tmp.HJnpA5zW9E ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.rjw2liDeF4 ++ mktemp + local LAST_ERR=/tmp/tmp.HrNXnCI88L + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rjw2liDeF4 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-10972.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("ab265473-5d90-4ac3-96aa-655cf9ab4f54") } Percona Server for MongoDB server version: v7.0.12-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.HrNXnCI88L + rm /tmp/tmp.rjw2liDeF4 /tmp/tmp.HrNXnCI88L + return 0 + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-10972 mongodb + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.88d3gUmx0A +++ mktemp ++ local LAST_ERR=/tmp/tmp.XiN8uMcyid ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.88d3gUmx0A ++ cat /tmp/tmp.XiN8uMcyid ++ rm /tmp/tmp.88d3gUmx0A /tmp/tmp.XiN8uMcyid ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-10972 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.t20YjNSVI1 ++ mktemp + local LAST_ERR=/tmp/tmp.u52x2YKRH6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.t20YjNSVI1 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.expose-sharded-10972.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("ccd3df19-5f0c-436e-bdba-b3056fb24efb") } Percona Server for MongoDB server version: v7.0.12-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.u52x2YKRH6 + rm /tmp/tmp.t20YjNSVI1 /tmp/tmp.u52x2YKRH6 + return 0 + run_mongos 'sh.enableSharding("myApp","rs0")' clusterAdmin:clusterAdmin123456@some-name-mongos.expose-sharded-10972 + local 'command=sh.enableSharding("myApp","rs0")' + local uri=clusterAdmin:clusterAdmin123456@some-name-mongos.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qHTut4rcNh +++ mktemp ++ local LAST_ERR=/tmp/tmp.2HjPNEpUQd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qHTut4rcNh ++ cat /tmp/tmp.2HjPNEpUQd ++ rm /tmp/tmp.qHTut4rcNh /tmp/tmp.2HjPNEpUQd ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''sh.enableSharding("myApp","rs0")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.ypqxMqijbu ++ mktemp + local LAST_ERR=/tmp/tmp.nyY9oSv9Iz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''sh.enableSharding("myApp","rs0")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ypqxMqijbu Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-10972.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("486d2343-5b7e-49e3-95e7-dfb519a6de8b") } Percona Server for MongoDB server version: v7.0.12-7 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1722439458, 8), "signature" : { "hash" : BinData(0,"67fSEKEghDgkc48LhgTUnTRKcFg="), "keyId" : NumberLong("7397820608874020887") } }, "operationTime" : Timestamp(1722439458, 2) } bye + cat /tmp/tmp.nyY9oSv9Iz + rm /tmp/tmp.ypqxMqijbu /tmp/tmp.nyY9oSv9Iz + return 0 + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.expose-sharded-10972 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ePhb2NCtLb +++ mktemp ++ local LAST_ERR=/tmp/tmp.sDikVgZbfG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ePhb2NCtLb ++ cat /tmp/tmp.sDikVgZbfG ++ rm /tmp/tmp.ePhb2NCtLb /tmp/tmp.sDikVgZbfG ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.3evh1D90uy ++ mktemp + local LAST_ERR=/tmp/tmp.6gRiD17ZMP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3evh1D90uy Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-10972.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("550f2742-71b2-4925-9bc5-4e07b9dc30ff") } Percona Server for MongoDB server version: v7.0.12-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.6gRiD17ZMP + rm /tmp/tmp.3evh1D90uy /tmp/tmp.6gRiD17ZMP + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-10972 + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-10972 + local postfix= + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.3GI97o3Rzo +++ mktemp ++ local LAST_ERR=/tmp/tmp.ncmMPYDCCh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3GI97o3Rzo ++ cat /tmp/tmp.ncmMPYDCCh ++ rm /tmp/tmp.3GI97o3Rzo /tmp/tmp.ncmMPYDCCh ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.kCGqElirjY ++ mktemp + local LAST_ERR=/tmp/tmp.xVY981ofhi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kCGqElirjY + cat /tmp/tmp.xVY981ofhi + rm /tmp/tmp.kCGqElirjY /tmp/tmp.xVY981ofhi + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find.json /tmp/tmp.azxQG9D5DO/find + desc 'Unexposed -> Exposed, ClusterIP' + set +o xtrace ----------------------------------------------------------------------------------- Unexposed -> Exposed, ClusterIP ----------------------------------------------------------------------------------- + expose_cluster ClusterIP + expose_type=ClusterIP + expose_status=true + kubectl_bin patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "exposeType" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "exposeType" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "exposeType" : "ClusterIP" } }]' ++ mktemp + local LAST_OUT=/tmp/tmp.AACjQ4fJXa ++ mktemp + local LAST_ERR=/tmp/tmp.xEJqlPG5wk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "exposeType" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "exposeType" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "exposeType" : "ClusterIP" } }]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AACjQ4fJXa perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.xEJqlPG5wk + rm /tmp/tmp.AACjQ4fJXa /tmp/tmp.xEJqlPG5wk + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vjSvADuNw8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.s7BOV2FtgF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vjSvADuNw8 ++ cat /tmp/tmp.s7BOV2FtgF ++ rm /tmp/tmp.vjSvADuNw8 /tmp/tmp.s7BOV2FtgF ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ck7QTwLV3f +++ mktemp ++ local LAST_ERR=/tmp/tmp.bDztsjZQ7B ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ck7QTwLV3f ++ cat /tmp/tmp.bDztsjZQ7B ++ rm /tmp/tmp.ck7QTwLV3f /tmp/tmp.bDztsjZQ7B ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EmHGQD6bZ1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.tdrmrjM3zK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EmHGQD6bZ1 ++ cat /tmp/tmp.tdrmrjM3zK ++ rm /tmp/tmp.EmHGQD6bZ1 /tmp/tmp.tdrmrjM3zK ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sFXe7K2z22 +++ mktemp ++ local LAST_ERR=/tmp/tmp.DcpUnoqXGz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sFXe7K2z22 ++ cat /tmp/tmp.DcpUnoqXGz ++ rm /tmp/tmp.sFXe7K2z22 /tmp/tmp.DcpUnoqXGz ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PLokpd2Myv +++ mktemp ++ local LAST_ERR=/tmp/tmp.6xCxERVNf4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PLokpd2Myv ++ cat /tmp/tmp.6xCxERVNf4 ++ rm /tmp/tmp.PLokpd2Myv /tmp/tmp.6xCxERVNf4 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SxKm4XqOvm +++ mktemp ++ local LAST_ERR=/tmp/tmp.fCfFGJ1YVY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SxKm4XqOvm ++ cat /tmp/tmp.fCfFGJ1YVY ++ rm /tmp/tmp.SxKm4XqOvm /tmp/tmp.fCfFGJ1YVY ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.01NoxW99I6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.hkjNqUjFgG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.01NoxW99I6 ++ cat /tmp/tmp.hkjNqUjFgG ++ rm /tmp/tmp.01NoxW99I6 /tmp/tmp.hkjNqUjFgG ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.expose-sharded-10972 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.F6BxKuOwnA +++ mktemp ++ local LAST_ERR=/tmp/tmp.HOYbNNOToh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.F6BxKuOwnA ++ cat /tmp/tmp.HOYbNNOToh ++ rm /tmp/tmp.F6BxKuOwnA /tmp/tmp.HOYbNNOToh ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.5swKnLjBo3 ++ mktemp + local LAST_ERR=/tmp/tmp.tWZfEdPTM8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5swKnLjBo3 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-10972.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("3b5d5ca5-56d0-43a4-b7e0-d092c6b20e72") } Percona Server for MongoDB server version: v7.0.12-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.tWZfEdPTM8 + rm /tmp/tmp.5swKnLjBo3 /tmp/tmp.tWZfEdPTM8 + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-10972 -2nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-10972 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-10972 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EXOUinaYYR +++ mktemp ++ local LAST_ERR=/tmp/tmp.VhrD0apfI9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EXOUinaYYR ++ cat /tmp/tmp.VhrD0apfI9 ++ rm /tmp/tmp.EXOUinaYYR /tmp/tmp.VhrD0apfI9 ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.TKqHKsBJgY ++ mktemp + local LAST_ERR=/tmp/tmp.MKuwvol3ub + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TKqHKsBJgY + cat /tmp/tmp.MKuwvol3ub + rm /tmp/tmp.TKqHKsBJgY /tmp/tmp.MKuwvol3ub + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-2nd.json /tmp/tmp.azxQG9D5DO/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hSDu5KyMUK +++ mktemp ++ local LAST_ERR=/tmp/tmp.tG0Dmio5zV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hSDu5KyMUK ++ cat /tmp/tmp.tG0Dmio5zV ++ rm /tmp/tmp.hSDu5KyMUK /tmp/tmp.tG0Dmio5zV ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.qDgSexbA0w ++ mktemp + local LAST_ERR=/tmp/tmp.338dnV76NJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qDgSexbA0w + cat /tmp/tmp.338dnV76NJ + rm /tmp/tmp.qDgSexbA0w /tmp/tmp.338dnV76NJ + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-2nd.json /tmp/tmp.azxQG9D5DO/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_OUT=/tmp/tmp.x0ljP3hoN4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.oyrFsXuO24 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.x0ljP3hoN4 ++ cat /tmp/tmp.oyrFsXuO24 ++ rm /tmp/tmp.x0ljP3hoN4 /tmp/tmp.oyrFsXuO24 ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.wSZuW83o5M ++ mktemp + local LAST_ERR=/tmp/tmp.gGbXQgCnsw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wSZuW83o5M + cat /tmp/tmp.gGbXQgCnsw + rm /tmp/tmp.wSZuW83o5M /tmp/tmp.gGbXQgCnsw + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-2nd.json /tmp/tmp.azxQG9D5DO/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.V0SEs8Jxtp +++ mktemp ++ local LAST_ERR=/tmp/tmp.M87VCvKn14 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.V0SEs8Jxtp ++ cat /tmp/tmp.M87VCvKn14 ++ rm /tmp/tmp.V0SEs8Jxtp /tmp/tmp.M87VCvKn14 ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.5lmxC1x9Zt ++ mktemp + local LAST_ERR=/tmp/tmp.32RtOYPJwB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5lmxC1x9Zt + cat /tmp/tmp.32RtOYPJwB + rm /tmp/tmp.5lmxC1x9Zt /tmp/tmp.32RtOYPJwB + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-2nd.json /tmp/tmp.azxQG9D5DO/find-2nd + compare_mongo_config some-name expose-sharded-10972 + cluster=some-name + namespace=expose-sharded-10972 + enable_expose=true + desc 'Compare mongo config' + set +o xtrace ----------------------------------------------------------------------------------- Compare mongo config ----------------------------------------------------------------------------------- + cfg_0_endpoint=some-name-cfg-0.some-name-cfg.expose-sharded-10972.svc.cluster.local ++ run_mongo 'var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-cfg-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })' clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-10972 ++ local 'command=var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-cfg-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-10972 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RQGsTlTAku ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2U3Qz3vYo5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RQGsTlTAku +++ cat /tmp/tmp.2U3Qz3vYo5 +++ rm /tmp/tmp.RQGsTlTAku /tmp/tmp.2U3Qz3vYo5 +++ return 0 ++ local client_container=psmdb-client-6c585f8dbd-dg6pk ++ local mongo_flag= ++ [[ clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-10972 == *cfg* ]] ++ replica_set=cfg ++ kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-cfg-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ybXVQkb6nO +++ mktemp ++ local LAST_ERR=/tmp/tmp.g4tb3vj3qe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-cfg-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ybXVQkb6nO ++ cat /tmp/tmp.g4tb3vj3qe ++ rm /tmp/tmp.ybXVQkb6nO /tmp/tmp.g4tb3vj3qe ++ return 0 + cfg_0_endpoint_actual=some-name-cfg-0.some-name-cfg.expose-sharded-10972.svc.cluster.local:27017 + rs0_0_endpoint=some-name-rs0-0.some-name-rs0.expose-sharded-10972.svc.cluster.local ++ run_mongo 'var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-rs0-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })' clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-10972 ++ local 'command=var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-rs0-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-10972 ++ local driver=mongodb+srv ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GaWyKvpyB5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NYi3mWXqdM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.GaWyKvpyB5 +++ cat /tmp/tmp.NYi3mWXqdM +++ rm /tmp/tmp.GaWyKvpyB5 /tmp/tmp.NYi3mWXqdM +++ return 0 ++ local client_container=psmdb-client-6c585f8dbd-dg6pk ++ local mongo_flag= ++ [[ clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-10972 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-rs0-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.btUhryP3tl +++ mktemp ++ local LAST_ERR=/tmp/tmp.sNPFFHsfRg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-rs0-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.btUhryP3tl ++ cat /tmp/tmp.sNPFFHsfRg ++ rm /tmp/tmp.btUhryP3tl /tmp/tmp.sNPFFHsfRg ++ return 0 + rs0_0_endpoint_actual=some-name-rs0-0.some-name-rs0.expose-sharded-10972.svc.cluster.local:27017 + [[ some-name-rs0-0.some-name-rs0.expose-sharded-10972.svc.cluster.local:27017 != \s\o\m\e\-\n\a\m\e\-\r\s\0\-\0\.\s\o\m\e\-\n\a\m\e\-\r\s\0\.\e\x\p\o\s\e\-\s\h\a\r\d\e\d\-\1\0\9\7\2\.\s\v\c\.\c\l\u\s\t\e\r\.\l\o\c\a\l\:\2\7\0\1\7 ]] + [[ some-name-cfg-0.some-name-cfg.expose-sharded-10972.svc.cluster.local:27017 != \s\o\m\e\-\n\a\m\e\-\c\f\g\-\0\.\s\o\m\e\-\n\a\m\e\-\c\f\g\.\e\x\p\o\s\e\-\s\h\a\r\d\e\d\-\1\0\9\7\2\.\s\v\c\.\c\l\u\s\t\e\r\.\l\o\c\a\l\:\2\7\0\1\7 ]] + desc 'Exposed, ClusterIP -> LoadBalancer' + set +o xtrace ----------------------------------------------------------------------------------- Exposed, ClusterIP -> LoadBalancer ----------------------------------------------------------------------------------- + expose_cluster LoadBalancer + expose_type=LoadBalancer + expose_status=true + kubectl_bin patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "exposeType" : "LoadBalancer" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "exposeType" : "LoadBalancer" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "exposeType" : "LoadBalancer" } }]' ++ mktemp + local LAST_OUT=/tmp/tmp.TWDVttNbaW ++ mktemp + local LAST_ERR=/tmp/tmp.TqmHLWfvvT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "exposeType" : "LoadBalancer" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "exposeType" : "LoadBalancer" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "exposeType" : "LoadBalancer" } }]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TWDVttNbaW perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.TqmHLWfvvT + rm /tmp/tmp.TWDVttNbaW /tmp/tmp.TqmHLWfvvT + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7Bd00Ix84X +++ mktemp ++ local LAST_ERR=/tmp/tmp.flESZo4pf9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7Bd00Ix84X ++ cat /tmp/tmp.flESZo4pf9 ++ rm /tmp/tmp.7Bd00Ix84X /tmp/tmp.flESZo4pf9 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ng1CRhSvPB +++ mktemp ++ local LAST_ERR=/tmp/tmp.RhL2mbwtec ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ng1CRhSvPB ++ cat /tmp/tmp.RhL2mbwtec ++ rm /tmp/tmp.Ng1CRhSvPB /tmp/tmp.RhL2mbwtec ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.voIw0aDxjw +++ mktemp ++ local LAST_ERR=/tmp/tmp.zLFcBHxPPW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.voIw0aDxjw ++ cat /tmp/tmp.zLFcBHxPPW ++ rm /tmp/tmp.voIw0aDxjw /tmp/tmp.zLFcBHxPPW ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bwumzeRmmi +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZueGTj8txe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bwumzeRmmi ++ cat /tmp/tmp.ZueGTj8txe ++ rm /tmp/tmp.bwumzeRmmi /tmp/tmp.ZueGTj8txe ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2VBNcv7krU +++ mktemp ++ local LAST_ERR=/tmp/tmp.43KnRfsYs5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2VBNcv7krU ++ cat /tmp/tmp.43KnRfsYs5 ++ rm /tmp/tmp.2VBNcv7krU /tmp/tmp.43KnRfsYs5 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.znv5BA07TC +++ mktemp ++ local LAST_ERR=/tmp/tmp.2wQBgZd5AN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.znv5BA07TC ++ cat /tmp/tmp.2wQBgZd5AN ++ rm /tmp/tmp.znv5BA07TC /tmp/tmp.2wQBgZd5AN ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AZIdTaFopZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.LEzqcYj6J1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AZIdTaFopZ ++ cat /tmp/tmp.LEzqcYj6J1 ++ rm /tmp/tmp.AZIdTaFopZ /tmp/tmp.LEzqcYj6J1 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + run_mongos 'use myApp\n db.test.insert({ x: 100502 })' myApp:myPass@some-name-mongos.expose-sharded-10972 + local 'command=use myApp\n db.test.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.34QBcMVNEL +++ mktemp ++ local LAST_ERR=/tmp/tmp.p3UTGgnFxR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.34QBcMVNEL ++ cat /tmp/tmp.p3UTGgnFxR ++ rm /tmp/tmp.34QBcMVNEL /tmp/tmp.p3UTGgnFxR ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.KQIPKKwniw ++ mktemp + local LAST_ERR=/tmp/tmp.ALgfgOlbEk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KQIPKKwniw Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-10972.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("eafcedcf-3c75-4714-8271-ed9f449caf60") } Percona Server for MongoDB server version: v7.0.12-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.ALgfgOlbEk + rm /tmp/tmp.KQIPKKwniw /tmp/tmp.ALgfgOlbEk + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-10972 -3nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-10972 + local postfix=-3nd + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.20mninVG52 +++ mktemp ++ local LAST_ERR=/tmp/tmp.GFO7Yaee7g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.20mninVG52 ++ cat /tmp/tmp.GFO7Yaee7g ++ rm /tmp/tmp.20mninVG52 /tmp/tmp.GFO7Yaee7g ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.dUdrCBdCwo ++ mktemp + local LAST_ERR=/tmp/tmp.kRloukBgFA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dUdrCBdCwo + cat /tmp/tmp.kRloukBgFA + rm /tmp/tmp.dUdrCBdCwo /tmp/tmp.kRloukBgFA + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-3nd.json /tmp/tmp.azxQG9D5DO/find-3nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 -3nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 + local postfix=-3nd + local suffix= + local database=myApp + local collection=test + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2kVFR2QVrY +++ mktemp ++ local LAST_ERR=/tmp/tmp.I2PaEAIANh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2kVFR2QVrY ++ cat /tmp/tmp.I2PaEAIANh ++ rm /tmp/tmp.2kVFR2QVrY /tmp/tmp.I2PaEAIANh ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.x5VgOXWDQX ++ mktemp + local LAST_ERR=/tmp/tmp.LbZ5ZiCMfp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.x5VgOXWDQX + cat /tmp/tmp.LbZ5ZiCMfp + rm /tmp/tmp.x5VgOXWDQX /tmp/tmp.LbZ5ZiCMfp + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-3nd.json /tmp/tmp.azxQG9D5DO/find-3nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 -3nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 + local postfix=-3nd + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.G05wlDCEZO +++ mktemp ++ local LAST_ERR=/tmp/tmp.QEUHaK9vUB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.G05wlDCEZO ++ cat /tmp/tmp.QEUHaK9vUB ++ rm /tmp/tmp.G05wlDCEZO /tmp/tmp.QEUHaK9vUB ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.jdkJj2NGKv ++ mktemp + local LAST_ERR=/tmp/tmp.u9G8PGAbhN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jdkJj2NGKv + cat /tmp/tmp.u9G8PGAbhN + rm /tmp/tmp.jdkJj2NGKv /tmp/tmp.u9G8PGAbhN + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-3nd.json /tmp/tmp.azxQG9D5DO/find-3nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 -3nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 + local postfix=-3nd + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_OUT=/tmp/tmp.330ajw9Z0Y +++ mktemp ++ local LAST_ERR=/tmp/tmp.oDu5sc3He4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.330ajw9Z0Y ++ cat /tmp/tmp.oDu5sc3He4 ++ rm /tmp/tmp.330ajw9Z0Y /tmp/tmp.oDu5sc3He4 ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.7Z6IIP4Ht6 ++ mktemp + local LAST_ERR=/tmp/tmp.cDBEuGZK06 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7Z6IIP4Ht6 + cat /tmp/tmp.cDBEuGZK06 + rm /tmp/tmp.7Z6IIP4Ht6 /tmp/tmp.cDBEuGZK06 + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-3nd.json /tmp/tmp.azxQG9D5DO/find-3nd + sleep 60 + desc 'Pause Exposed cluster (LoadBalancer)' + set +o xtrace ----------------------------------------------------------------------------------- Pause Exposed cluster (LoadBalancer) ----------------------------------------------------------------------------------- + stop_cluster some-name + local cluster_name=some-name + local max_wait_time=120 + local passed_time=0 + local sleep_time=1 + kubectl_bin patch psmdb some-name --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.804XesbAfa ++ mktemp + local LAST_ERR=/tmp/tmp.yfusReM29Q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.804XesbAfa perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.yfusReM29Q + rm /tmp/tmp.804XesbAfa /tmp/tmp.yfusReM29Q + return 0 + set +x Waiting for cluster stop...............Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found + start_cluster some-name + local cluster_name=some-name + kubectl_bin patch psmdb some-name --type json '-p=[{"op":"add","path":"/spec/pause","value":false}]' ++ mktemp + local LAST_OUT=/tmp/tmp.gbbouBkcaA ++ mktemp + local LAST_ERR=/tmp/tmp.P9fOCkrfuZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type json '-p=[{"op":"add","path":"/spec/pause","value":false}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gbbouBkcaA perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.P9fOCkrfuZ + rm /tmp/tmp.gbbouBkcaA /tmp/tmp.P9fOCkrfuZ + return 0 + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dcXzKikB5n +++ mktemp ++ local LAST_ERR=/tmp/tmp.LW3dyMrEmL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dcXzKikB5n ++ cat /tmp/tmp.LW3dyMrEmL ++ rm /tmp/tmp.dcXzKikB5n /tmp/tmp.LW3dyMrEmL ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZR0T8cOICA +++ mktemp ++ local LAST_ERR=/tmp/tmp.TI3XfJ5pUV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZR0T8cOICA ++ cat /tmp/tmp.TI3XfJ5pUV ++ rm /tmp/tmp.ZR0T8cOICA /tmp/tmp.TI3XfJ5pUV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RTacSAy6F4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.xJBpstldOK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RTacSAy6F4 ++ cat /tmp/tmp.xJBpstldOK ++ rm /tmp/tmp.RTacSAy6F4 /tmp/tmp.xJBpstldOK ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ORjN9tZoqG +++ mktemp ++ local LAST_ERR=/tmp/tmp.8FUmDq8Sdm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ORjN9tZoqG ++ cat /tmp/tmp.8FUmDq8Sdm ++ rm /tmp/tmp.ORjN9tZoqG /tmp/tmp.8FUmDq8Sdm ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.73oXpPqUu4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.vOAv0KdJhl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.73oXpPqUu4 ++ cat /tmp/tmp.vOAv0KdJhl ++ rm /tmp/tmp.73oXpPqUu4 /tmp/tmp.vOAv0KdJhl ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.S8x5uVqHbM +++ mktemp ++ local LAST_ERR=/tmp/tmp.gkdSf2cTEV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.S8x5uVqHbM ++ cat /tmp/tmp.gkdSf2cTEV ++ rm /tmp/tmp.S8x5uVqHbM /tmp/tmp.gkdSf2cTEV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XBZpvlwwfJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.vRPmCn5Bno ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XBZpvlwwfJ ++ cat /tmp/tmp.vRPmCn5Bno ++ rm /tmp/tmp.XBZpvlwwfJ /tmp/tmp.vRPmCn5Bno ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uMOLnxNB6Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.OOrnvjZUVP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uMOLnxNB6Q ++ cat /tmp/tmp.OOrnvjZUVP ++ rm /tmp/tmp.uMOLnxNB6Q /tmp/tmp.OOrnvjZUVP ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YpXiHGb1Uw +++ mktemp ++ local LAST_ERR=/tmp/tmp.fnAn3nNZxi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YpXiHGb1Uw ++ cat /tmp/tmp.fnAn3nNZxi ++ rm /tmp/tmp.YpXiHGb1Uw /tmp/tmp.fnAn3nNZxi ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ma3UXdAtIJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.3h3IEkEfTD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ma3UXdAtIJ ++ cat /tmp/tmp.3h3IEkEfTD ++ rm /tmp/tmp.ma3UXdAtIJ /tmp/tmp.3h3IEkEfTD ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + run_mongos 'use myApp\n db.test.insert({ x: 100503 })' myApp:myPass@some-name-mongos.expose-sharded-10972 + local 'command=use myApp\n db.test.insert({ x: 100503 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8f3EBOzhfm +++ mktemp ++ local LAST_ERR=/tmp/tmp.0ZWTuL0wEz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8f3EBOzhfm ++ cat /tmp/tmp.0ZWTuL0wEz ++ rm /tmp/tmp.8f3EBOzhfm /tmp/tmp.0ZWTuL0wEz ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.CaWuBaiyQ3 ++ mktemp + local LAST_ERR=/tmp/tmp.a6oIXj1eVD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CaWuBaiyQ3 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-10972.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("49bd6e32-0e74-495a-8447-d1d51e33d02d") } Percona Server for MongoDB server version: v7.0.12-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.a6oIXj1eVD + rm /tmp/tmp.CaWuBaiyQ3 /tmp/tmp.a6oIXj1eVD + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-10972 -4nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-10972 + local postfix=-4nd + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.f0YzOJ4NtT +++ mktemp ++ local LAST_ERR=/tmp/tmp.dYPD2JN2wN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.f0YzOJ4NtT ++ cat /tmp/tmp.dYPD2JN2wN ++ rm /tmp/tmp.f0YzOJ4NtT /tmp/tmp.dYPD2JN2wN ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.n1tNnaaOx5 ++ mktemp + local LAST_ERR=/tmp/tmp.gRL4Xp7YLo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.n1tNnaaOx5 + cat /tmp/tmp.gRL4Xp7YLo + rm /tmp/tmp.n1tNnaaOx5 /tmp/tmp.gRL4Xp7YLo + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-4nd.json /tmp/tmp.azxQG9D5DO/find-4nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 -4nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 + local postfix=-4nd + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bozqfx1d5q + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp ++ local LAST_ERR=/tmp/tmp.7FlKtAWW8y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bozqfx1d5q ++ cat /tmp/tmp.7FlKtAWW8y ++ rm /tmp/tmp.bozqfx1d5q /tmp/tmp.7FlKtAWW8y ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.NhRVoI87mx ++ mktemp + local LAST_ERR=/tmp/tmp.1kQtccVqCv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NhRVoI87mx + cat /tmp/tmp.1kQtccVqCv + rm /tmp/tmp.NhRVoI87mx /tmp/tmp.1kQtccVqCv + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-4nd.json /tmp/tmp.azxQG9D5DO/find-4nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 -4nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 + local postfix=-4nd + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.xeMy9NI6U7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.0foRGc6u1E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xeMy9NI6U7 ++ cat /tmp/tmp.0foRGc6u1E ++ rm /tmp/tmp.xeMy9NI6U7 /tmp/tmp.0foRGc6u1E ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.8ZTj2glRtw ++ mktemp + local LAST_ERR=/tmp/tmp.rsLFXaEYwy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8ZTj2glRtw + cat /tmp/tmp.rsLFXaEYwy + rm /tmp/tmp.8ZTj2glRtw /tmp/tmp.rsLFXaEYwy + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-4nd.json /tmp/tmp.azxQG9D5DO/find-4nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 -4nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 + local postfix=-4nd + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Oru9lfnccj +++ mktemp ++ local LAST_ERR=/tmp/tmp.d1tRuz81rW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Oru9lfnccj ++ cat /tmp/tmp.d1tRuz81rW ++ rm /tmp/tmp.Oru9lfnccj /tmp/tmp.d1tRuz81rW ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.PHYQjfGbCm ++ mktemp + local LAST_ERR=/tmp/tmp.ICx4RKsrDS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PHYQjfGbCm + cat /tmp/tmp.ICx4RKsrDS + rm /tmp/tmp.PHYQjfGbCm /tmp/tmp.ICx4RKsrDS + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-4nd.json /tmp/tmp.azxQG9D5DO/find-4nd + desc 'Exposed, LoadBalancer -> ClusterIP' + set +o xtrace ----------------------------------------------------------------------------------- Exposed, LoadBalancer -> ClusterIP ----------------------------------------------------------------------------------- + expose_cluster ClusterIP + expose_type=ClusterIP + expose_status=true + kubectl_bin patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "exposeType" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "exposeType" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "exposeType" : "ClusterIP" } }]' ++ mktemp + local LAST_OUT=/tmp/tmp.6J8LYV0EiL ++ mktemp + local LAST_ERR=/tmp/tmp.q2hhaGYhAy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "exposeType" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "exposeType" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "exposeType" : "ClusterIP" } }]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6J8LYV0EiL perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.q2hhaGYhAy + rm /tmp/tmp.6J8LYV0EiL /tmp/tmp.q2hhaGYhAy + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Zyqq0X8a4A +++ mktemp ++ local LAST_ERR=/tmp/tmp.JWH2gbhDDN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Zyqq0X8a4A ++ cat /tmp/tmp.JWH2gbhDDN ++ rm /tmp/tmp.Zyqq0X8a4A /tmp/tmp.JWH2gbhDDN ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MDrywGdEZr +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZV7nmfUHUX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MDrywGdEZr ++ cat /tmp/tmp.ZV7nmfUHUX ++ rm /tmp/tmp.MDrywGdEZr /tmp/tmp.ZV7nmfUHUX ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Z85RiNisQF +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ssmz2h5ZfU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Z85RiNisQF ++ cat /tmp/tmp.Ssmz2h5ZfU ++ rm /tmp/tmp.Z85RiNisQF /tmp/tmp.Ssmz2h5ZfU ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vZu7gJuEen +++ mktemp ++ local LAST_ERR=/tmp/tmp.A76KsrIvMR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vZu7gJuEen ++ cat /tmp/tmp.A76KsrIvMR ++ rm /tmp/tmp.vZu7gJuEen /tmp/tmp.A76KsrIvMR ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yvHSwHqR3z +++ mktemp ++ local LAST_ERR=/tmp/tmp.KL3lyY1Lan ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yvHSwHqR3z ++ cat /tmp/tmp.KL3lyY1Lan ++ rm /tmp/tmp.yvHSwHqR3z /tmp/tmp.KL3lyY1Lan ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zseHfpgHlk +++ mktemp ++ local LAST_ERR=/tmp/tmp.nZGGNIgAdu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zseHfpgHlk ++ cat /tmp/tmp.nZGGNIgAdu ++ rm /tmp/tmp.zseHfpgHlk /tmp/tmp.nZGGNIgAdu ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.V8doaNsp0a +++ mktemp ++ local LAST_ERR=/tmp/tmp.08UnvAgVdk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.V8doaNsp0a ++ cat /tmp/tmp.08UnvAgVdk ++ rm /tmp/tmp.V8doaNsp0a /tmp/tmp.08UnvAgVdk ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + run_mongos 'use myApp\n db.test.insert({ x: 100504 })' myApp:myPass@some-name-mongos.expose-sharded-10972 + local 'command=use myApp\n db.test.insert({ x: 100504 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KzyB43V90C +++ mktemp ++ local LAST_ERR=/tmp/tmp.yRpNxhxwvG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KzyB43V90C ++ cat /tmp/tmp.yRpNxhxwvG ++ rm /tmp/tmp.KzyB43V90C /tmp/tmp.yRpNxhxwvG ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100504 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.Jkp4R0F4dl ++ mktemp + local LAST_ERR=/tmp/tmp.DsJiLctPhi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100504 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Jkp4R0F4dl Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-10972.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("3fe4c1e6-6b27-4b83-9936-549f60b93abb") } Percona Server for MongoDB server version: v7.0.12-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.DsJiLctPhi + rm /tmp/tmp.Jkp4R0F4dl /tmp/tmp.DsJiLctPhi + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-10972 -5nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-10972 + local postfix=-5nd + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PXeOfk4bc4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.cKgEIlQBb2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PXeOfk4bc4 ++ cat /tmp/tmp.cKgEIlQBb2 ++ rm /tmp/tmp.PXeOfk4bc4 /tmp/tmp.cKgEIlQBb2 ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.3jcsBBhX4p ++ mktemp + local LAST_ERR=/tmp/tmp.ay687o0zUx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3jcsBBhX4p + cat /tmp/tmp.ay687o0zUx + rm /tmp/tmp.3jcsBBhX4p /tmp/tmp.ay687o0zUx + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-5nd.json /tmp/tmp.azxQG9D5DO/find-5nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 -5nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 + local postfix=-5nd + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kd3lDnNKVj +++ mktemp ++ local LAST_ERR=/tmp/tmp.f41bNIXHse ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kd3lDnNKVj ++ cat /tmp/tmp.f41bNIXHse ++ rm /tmp/tmp.kd3lDnNKVj /tmp/tmp.f41bNIXHse ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.X9lG89M6kz ++ mktemp + local LAST_ERR=/tmp/tmp.NuxoTHX2P7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.X9lG89M6kz + cat /tmp/tmp.NuxoTHX2P7 + rm /tmp/tmp.X9lG89M6kz /tmp/tmp.NuxoTHX2P7 + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-5nd.json /tmp/tmp.azxQG9D5DO/find-5nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 -5nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 + local postfix=-5nd + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3rU80uE4Mc +++ mktemp ++ local LAST_ERR=/tmp/tmp.LkyajtXp7E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3rU80uE4Mc ++ cat /tmp/tmp.LkyajtXp7E ++ rm /tmp/tmp.3rU80uE4Mc /tmp/tmp.LkyajtXp7E ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.S2vgwcxPJX ++ mktemp + local LAST_ERR=/tmp/tmp.GAqiOm6JTy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.S2vgwcxPJX + cat /tmp/tmp.GAqiOm6JTy + rm /tmp/tmp.S2vgwcxPJX /tmp/tmp.GAqiOm6JTy + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-5nd.json /tmp/tmp.azxQG9D5DO/find-5nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 -5nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 + local postfix=-5nd + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LK5X8yjGvi +++ mktemp ++ local LAST_ERR=/tmp/tmp.C8UaRKrufK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LK5X8yjGvi ++ cat /tmp/tmp.C8UaRKrufK ++ rm /tmp/tmp.LK5X8yjGvi /tmp/tmp.C8UaRKrufK ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.vqWUzAtv5u ++ mktemp + local LAST_ERR=/tmp/tmp.hcwJC0mGrZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vqWUzAtv5u + cat /tmp/tmp.hcwJC0mGrZ + rm /tmp/tmp.vqWUzAtv5u /tmp/tmp.hcwJC0mGrZ + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-5nd.json /tmp/tmp.azxQG9D5DO/find-5nd + desc 'Exposed -> Unexposed' + set +o xtrace ----------------------------------------------------------------------------------- Exposed -> Unexposed ----------------------------------------------------------------------------------- + expose_cluster ClusterIP false + expose_type=ClusterIP + expose_status=false + kubectl_bin patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": false, "exposeType" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "exposeType" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": false, "exposeType" : "ClusterIP" } }]' ++ mktemp + local LAST_OUT=/tmp/tmp.czUpcG1xai ++ mktemp + local LAST_ERR=/tmp/tmp.CmkupHO8Mb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": false, "exposeType" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "exposeType" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": false, "exposeType" : "ClusterIP" } }]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.czUpcG1xai perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.CmkupHO8Mb + rm /tmp/tmp.czUpcG1xai /tmp/tmp.CmkupHO8Mb + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Yu4ubyUzWa +++ mktemp ++ local LAST_ERR=/tmp/tmp.kZhDdWgPwl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Yu4ubyUzWa ++ cat /tmp/tmp.kZhDdWgPwl ++ rm /tmp/tmp.Yu4ubyUzWa /tmp/tmp.kZhDdWgPwl ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0oGnIuRaXf +++ mktemp ++ local LAST_ERR=/tmp/tmp.3QzqgaEmKG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0oGnIuRaXf ++ cat /tmp/tmp.3QzqgaEmKG ++ rm /tmp/tmp.0oGnIuRaXf /tmp/tmp.3QzqgaEmKG ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MWD6n2KIUE +++ mktemp ++ local LAST_ERR=/tmp/tmp.CxJiK0e5G6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MWD6n2KIUE ++ cat /tmp/tmp.CxJiK0e5G6 ++ rm /tmp/tmp.MWD6n2KIUE /tmp/tmp.CxJiK0e5G6 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hYCiN4Tu69 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ieJcYHQFHu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hYCiN4Tu69 ++ cat /tmp/tmp.ieJcYHQFHu ++ rm /tmp/tmp.hYCiN4Tu69 /tmp/tmp.ieJcYHQFHu ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NJG1LLNY58 +++ mktemp ++ local LAST_ERR=/tmp/tmp.bqueluGKii ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NJG1LLNY58 ++ cat /tmp/tmp.bqueluGKii ++ rm /tmp/tmp.NJG1LLNY58 /tmp/tmp.bqueluGKii ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5EsGtNdBz6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Vk8NhYWaVV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5EsGtNdBz6 ++ cat /tmp/tmp.Vk8NhYWaVV ++ rm /tmp/tmp.5EsGtNdBz6 /tmp/tmp.Vk8NhYWaVV ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.48aHR2GHHB +++ mktemp ++ local LAST_ERR=/tmp/tmp.lyoK9baHX6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.48aHR2GHHB ++ cat /tmp/tmp.lyoK9baHX6 ++ rm /tmp/tmp.48aHR2GHHB /tmp/tmp.lyoK9baHX6 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + run_mongos 'use myApp\n db.test.insert({ x: 100505 })' myApp:myPass@some-name-mongos.expose-sharded-10972 + local 'command=use myApp\n db.test.insert({ x: 100505 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0kI9oE9zkG +++ mktemp ++ local LAST_ERR=/tmp/tmp.yQu1XllhEc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0kI9oE9zkG ++ cat /tmp/tmp.yQu1XllhEc ++ rm /tmp/tmp.0kI9oE9zkG /tmp/tmp.yQu1XllhEc ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100505 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.hnC5aNijL3 ++ mktemp + local LAST_ERR=/tmp/tmp.DkbgJunGko + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100505 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hnC5aNijL3 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-10972.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("d87b192e-0c9c-4982-8758-c54cf9982ed7") } Percona Server for MongoDB server version: v7.0.12-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.DkbgJunGko + rm /tmp/tmp.hnC5aNijL3 /tmp/tmp.DkbgJunGko + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-10972 -6nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-10972 + local postfix=-6nd + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-10972 + local driver=mongodb + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nH6PZjEYFC + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp ++ local LAST_ERR=/tmp/tmp.jQUVjDujjG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nH6PZjEYFC ++ cat /tmp/tmp.jQUVjDujjG ++ rm /tmp/tmp.nH6PZjEYFC /tmp/tmp.jQUVjDujjG ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.NOeqwr4vTH ++ mktemp + local LAST_ERR=/tmp/tmp.gppS8suFyc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-10972.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NOeqwr4vTH + cat /tmp/tmp.gppS8suFyc + rm /tmp/tmp.NOeqwr4vTH /tmp/tmp.gppS8suFyc + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-6nd.json /tmp/tmp.azxQG9D5DO/find-6nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 -6nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 + local postfix=-6nd + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.l32T3veHho +++ mktemp ++ local LAST_ERR=/tmp/tmp.tbXgSIFMx3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.l32T3veHho ++ cat /tmp/tmp.tbXgSIFMx3 ++ rm /tmp/tmp.l32T3veHho /tmp/tmp.tbXgSIFMx3 ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.98ZyE7rvRE ++ mktemp + local LAST_ERR=/tmp/tmp.zAG5SV3X3h + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.98ZyE7rvRE + cat /tmp/tmp.zAG5SV3X3h + rm /tmp/tmp.98ZyE7rvRE /tmp/tmp.zAG5SV3X3h + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-6nd.json /tmp/tmp.azxQG9D5DO/find-6nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 -6nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 + local postfix=-6nd + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.i4cvMGV2Y7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.VCWVg8vXoj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.i4cvMGV2Y7 ++ cat /tmp/tmp.VCWVg8vXoj ++ rm /tmp/tmp.i4cvMGV2Y7 /tmp/tmp.VCWVg8vXoj ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ZVUYtHcM5z ++ mktemp + local LAST_ERR=/tmp/tmp.43ju5ZsovU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZVUYtHcM5z + cat /tmp/tmp.43ju5ZsovU + rm /tmp/tmp.ZVUYtHcM5z /tmp/tmp.43ju5ZsovU + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-6nd.json /tmp/tmp.azxQG9D5DO/find-6nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 -6nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 + local postfix=-6nd + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.e2xPvtyzCV +++ mktemp ++ local LAST_ERR=/tmp/tmp.zP2jhWuFXl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.e2xPvtyzCV ++ cat /tmp/tmp.zP2jhWuFXl ++ rm /tmp/tmp.e2xPvtyzCV /tmp/tmp.zP2jhWuFXl ++ return 0 + local client_container=psmdb-client-6c585f8dbd-dg6pk + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.kt9IvCh7df ++ mktemp + local LAST_ERR=/tmp/tmp.LlMuECH844 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-dg6pk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-10972.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kt9IvCh7df + cat /tmp/tmp.LlMuECH844 + rm /tmp/tmp.kt9IvCh7df /tmp/tmp.LlMuECH844 + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/expose-sharded/compare/find-6nd.json /tmp/tmp.azxQG9D5DO/find-6nd + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/conf/container-rc.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.ZPepVtMDZf ++ mktemp + local LAST_ERR=/tmp/tmp.EwgbKNJms7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/conf/container-rc.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZPepVtMDZf runtimeclass.node.k8s.io "container-rc" deleted + cat /tmp/tmp.EwgbKNJms7 + rm /tmp/tmp.ZPepVtMDZf /tmp/tmp.EwgbKNJms7 + return 0 + destroy expose-sharded-10972 + local namespace=expose-sharded-10972 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.jS9mXYXAeT ++ mktemp + local LAST_ERR=/tmp/tmp.XSUW9lHh1m + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jS9mXYXAeT customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.XSUW9lHh1m + rm /tmp/tmp.jS9mXYXAeT /tmp/tmp.XSUW9lHh1m + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.eSb2R9XUPF ++ mktemp + local LAST_ERR=/tmp/tmp.5Nnon06AYX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eSb2R9XUPF + cat /tmp/tmp.5Nnon06AYX + rm /tmp/tmp.eSb2R9XUPF /tmp/tmp.5Nnon06AYX + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.4H3DslFjw8 ++ mktemp + local LAST_ERR=/tmp/tmp.4zCSlJETzq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4H3DslFjw8 + cat /tmp/tmp.4zCSlJETzq + rm /tmp/tmp.4H3DslFjw8 /tmp/tmp.4zCSlJETzq + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ufqRWnuRqK ++ mktemp + local LAST_ERR=/tmp/tmp.ho9wEiQ9gk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ufqRWnuRqK + cat /tmp/tmp.ho9wEiQ9gk + rm /tmp/tmp.ufqRWnuRqK /tmp/tmp.ho9wEiQ9gk + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.iDojZ3AWhi ++ mktemp + local LAST_ERR=/tmp/tmp.pyBEAtXqRb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iDojZ3AWhi clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.pyBEAtXqRb + rm /tmp/tmp.iDojZ3AWhi /tmp/tmp.pyBEAtXqRb + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Pgr9quVW5r ++ mktemp + local LAST_ERR=/tmp/tmp.CRA8u6IDcU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.Pgr9quVW5r + cat /tmp/tmp.CRA8u6IDcU Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.Pgr9quVW5r + cat /tmp/tmp.CRA8u6IDcU Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.Pgr9quVW5r + cat /tmp/tmp.CRA8u6IDcU Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.Pgr9quVW5r + cat /tmp/tmp.CRA8u6IDcU Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.Pgr9quVW5r /tmp/tmp.CRA8u6IDcU + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + rm -rf /tmp/tmp.azxQG9D5DO + kubectl_bin delete --grace-period=0 --force=true namespace expose-sharded-10972 ++ mktemp + local LAST_OUT=/tmp/tmp.FdzxSUW95z ++ mktemp + local LAST_ERR=/tmp/tmp.gFGsZc7egI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace expose-sharded-10972 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.DYpUFWm5lX ++ mktemp + local LAST_ERR=/tmp/tmp.XRMpfLyQU9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator