Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/logs/cross-site-sharded.log grep: warning: stray \ before - Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 ++ get_mongod_ver_from_image perconalab/percona-server-mongodb-operator:main-mongod8.0 ++ local image=perconalab/percona-server-mongodb-operator:main-mongod8.0 +++ run_simple_cli_inside_image perconalab/percona-server-mongodb-operator:main-mongod8.0 'mongod --version' +++ local image=perconalab/percona-server-mongodb-operator:main-mongod8.0 +++ local 'cli=mongod --version' +++ local pod_name=3061 +++ /usr/sbin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' +++ kubectl_bin -n default run 3061 --image=perconalab/percona-server-mongodb-operator:main-mongod8.0 --restart=Never --command -- sleep infinity ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RDtIYPTGDJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Umy4N6IIvu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default run 3061 --image=perconalab/percona-server-mongodb-operator:main-mongod8.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RDtIYPTGDJ +++ cat /tmp/tmp.Umy4N6IIvu +++ rm /tmp/tmp.RDtIYPTGDJ /tmp/tmp.Umy4N6IIvu +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/3061 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0e2VkEl0uA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gse3HgGkRd +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/3061 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.0e2VkEl0uA +++ cat /tmp/tmp.gse3HgGkRd +++ rm /tmp/tmp.0e2VkEl0uA /tmp/tmp.gse3HgGkRd +++ return 0 ++++ kubectl_bin -n default exec 3061 -- bash -c 'mongod --version 2>&1' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.BdNlnWVeer +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.GukizKSr4R ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl -n default exec 3061 -- bash -c 'mongod --version 2>&1' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.BdNlnWVeer ++++ cat /tmp/tmp.GukizKSr4R ++++ rm /tmp/tmp.BdNlnWVeer /tmp/tmp.GukizKSr4R ++++ return 0 +++ local 'output=db version v8.0.12-4 Build Info: { "version": "8.0.12-4", "gitVersion": "d635038667c5f80ce2d641ab24a3c56810c8bbb3", "openSSLVersion": "OpenSSL 3.2.2 4 Jun 2024", "modules": [], "proFeatures": [], "allocator": "tcmalloc-google", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/3061 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZtkjcBrJgH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5DXYLxTglh +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default delete pod/3061 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ZtkjcBrJgH +++ cat /tmp/tmp.5DXYLxTglh Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.ZtkjcBrJgH /tmp/tmp.5DXYLxTglh +++ return 0 +++ echo db version v8.0.12-4 Build Info: '{' '"version":' '"8.0.12-4",' '"gitVersion":' '"d635038667c5f80ce2d641ab24a3c56810c8bbb3",' '"openSSLVersion":' '"OpenSSL' 3.2.2 4 Jun '2024",' '"modules":' '[],' '"proFeatures":' '[],' '"allocator":' '"tcmalloc-google",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=8.0.12-4 ++ [[ ! 8.0.12-4 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 8.0.12-4 + FULL_VER=8.0.12-4 + MONGO_VER=8.0 + unset OPERATOR_NS + main_cluster=cross-site-sharded-main + replica_cluster=cross-site-sharded-replica + desc 'create main cluster' + set +o xtrace ----------------------------------------------------------------------------------- create main cluster ----------------------------------------------------------------------------------- + create_infra cross-site-sharded-5777 + local ns=cross-site-sharded-5777 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.jcwUEjzcGg ++ mktemp + local LAST_ERR=/tmp/tmp.fUYt8mq9To + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jcwUEjzcGg customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.fUYt8mq9To + rm /tmp/tmp.jcwUEjzcGg /tmp/tmp.fUYt8mq9To + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.4HfwOh5qo3 ++ mktemp + local LAST_ERR=/tmp/tmp.DIwJjkcodE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4HfwOh5qo3 + cat /tmp/tmp.DIwJjkcodE + rm /tmp/tmp.4HfwOh5qo3 /tmp/tmp.DIwJjkcodE + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.sni8xsTgyB ++ mktemp + local LAST_ERR=/tmp/tmp.WxjzSVbaEK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sni8xsTgyB + cat /tmp/tmp.WxjzSVbaEK + rm /tmp/tmp.sni8xsTgyB /tmp/tmp.WxjzSVbaEK + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.D8cwhUnAOp ++ mktemp + local LAST_ERR=/tmp/tmp.jqwwXIejJT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.D8cwhUnAOp + cat /tmp/tmp.jqwwXIejJT + rm /tmp/tmp.D8cwhUnAOp /tmp/tmp.jqwwXIejJT + return 0 + local rbac_yaml=rbac.yaml + '[' -n '' ']' + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/deploy/rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.ZpEYOZp0if ++ mktemp + local LAST_ERR=/tmp/tmp.HkoRP0trBe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/deploy/rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZpEYOZp0if role.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted from cross-site-sharded-9050 namespace serviceaccount "percona-server-mongodb-operator" deleted from cross-site-sharded-9050 namespace rolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted from cross-site-sharded-9050 namespace + cat /tmp/tmp.HkoRP0trBe + rm /tmp/tmp.ZpEYOZp0if /tmp/tmp.HkoRP0trBe + return 0 + check_crd_for_deletion PR-2109-03bacb5d + local git_tag=PR-2109-03bacb5d ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2109-03bacb5d/deploy/crd.yaml ++ /usr/sbin/sed s/---//g ++ yq eval .metadata.name ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vfjyLJ7v93 +++ mktemp ++ local LAST_ERR=/tmp/tmp.WAzfeQ4iI2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.vfjyLJ7v93 ++ cat /tmp/tmp.WAzfeQ4iI2 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.vfjyLJ7v93 ++ cat /tmp/tmp.WAzfeQ4iI2 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.vfjyLJ7v93 ++ cat /tmp/tmp.WAzfeQ4iI2 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.vfjyLJ7v93 ++ cat /tmp/tmp.WAzfeQ4iI2 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.vfjyLJ7v93 /tmp/tmp.WAzfeQ4iI2 ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n '' ']' + create_namespace cross-site-sharded-5777 + local namespace=cross-site-sharded-5777 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces cross-site-sharded-5777' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces cross-site-sharded-5777 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace cross-site-sharded-5777 --ignore-not-found ++ mktemp ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.Nj32etNzQY ++ mktemp + local LAST_OUT=/tmp/tmp.V1VAqP7ROD ++ mktemp + local LAST_ERR=/tmp/tmp.pImKNc6D9z + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.Cz8RQ8yx7y + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace cross-site-sharded-5777 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Nj32etNzQY + cat /tmp/tmp.pImKNc6D9z + rm /tmp/tmp.Nj32etNzQY /tmp/tmp.pImKNc6D9z + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.V1VAqP7ROD + cat /tmp/tmp.Cz8RQ8yx7y + rm /tmp/tmp.V1VAqP7ROD /tmp/tmp.Cz8RQ8yx7y + return 0 + kubectl_bin wait --for=delete namespace cross-site-sharded-5777 ++ mktemp + local LAST_OUT=/tmp/tmp.U28KuMTHhl ++ mktemp + local LAST_ERR=/tmp/tmp.7vyyMnxJ7o + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace cross-site-sharded-5777 namespace "cross-site-sharded-9050" deleted namespace "cross-site-sharded-replica-6675" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.U28KuMTHhl + cat /tmp/tmp.7vyyMnxJ7o + rm /tmp/tmp.U28KuMTHhl /tmp/tmp.7vyyMnxJ7o + return 0 + desc 'create namespace cross-site-sharded-5777' + set +o xtrace ----------------------------------------------------------------------------------- create namespace cross-site-sharded-5777 ----------------------------------------------------------------------------------- + kubectl_bin create namespace cross-site-sharded-5777 ++ mktemp + local LAST_OUT=/tmp/tmp.DYAJgOMPap ++ mktemp + local LAST_ERR=/tmp/tmp.vZ2fPz0vEK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace cross-site-sharded-5777 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DYAJgOMPap namespace/cross-site-sharded-5777 created + cat /tmp/tmp.vZ2fPz0vEK + rm /tmp/tmp.DYAJgOMPap /tmp/tmp.vZ2fPz0vEK + return 0 + set_kube_ctx cross-site-sharded-5777 + local namespace=cross-site-sharded-5777 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.KtRQvtd8DP +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZWG1wpvTGA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KtRQvtd8DP ++ cat /tmp/tmp.ZWG1wpvTGA ++ rm /tmp/tmp.KtRQvtd8DP /tmp/tmp.ZWG1wpvTGA ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2109-03bacb5d-1-cluster3 --namespace=cross-site-sharded-5777 ++ mktemp + local LAST_OUT=/tmp/tmp.vBCLvhftBL ++ mktemp + local LAST_ERR=/tmp/tmp.ebvY3x9ysp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2109-03bacb5d-1-cluster3 --namespace=cross-site-sharded-5777 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vBCLvhftBL Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2109-03bacb5d-1-cluster3" modified. + cat /tmp/tmp.ebvY3x9ysp + rm /tmp/tmp.vBCLvhftBL /tmp/tmp.ebvY3x9ysp + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2109-03bacb5d' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2109-03bacb5d ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.oe5r4UlKWL ++ mktemp + local LAST_ERR=/tmp/tmp.vhpjQXxNPO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oe5r4UlKWL customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.vhpjQXxNPO + rm /tmp/tmp.oe5r4UlKWL /tmp/tmp.vhpjQXxNPO + return 0 + '[' -n '' ']' + apply_rbac rbac + local operator_namespace=psmdb-operator + local rbac=rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/deploy/rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.g6ZzCBmYoj ++ mktemp + local LAST_ERR=/tmp/tmp.r79aRS9L1a + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g6ZzCBmYoj role.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created rolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.r79aRS9L1a + rm /tmp/tmp.g6ZzCBmYoj /tmp/tmp.r79aRS9L1a + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2109-03bacb5d") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/deploy/operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.JgonleH4nm ++ mktemp + local LAST_ERR=/tmp/tmp.v5NztTAgAA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JgonleH4nm deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.v5NztTAgAA + rm /tmp/tmp.JgonleH4nm /tmp/tmp.v5NztTAgAA + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fmDeuPXkOL +++ mktemp ++ local LAST_ERR=/tmp/tmp.BiA5Imlavm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fmDeuPXkOL ++ cat /tmp/tmp.BiA5Imlavm ++ rm /tmp/tmp.fmDeuPXkOL /tmp/tmp.BiA5Imlavm ++ return 0 + wait_operator_pod percona-server-mongodb-operator-6c964988db-5f6wk + local pod=percona-server-mongodb-operator-6c964988db-5f6wk + set +o xtrace waiting for pod/percona-server-mongodb-operator-6c964988db-5f6wk to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FgVgo3bmno +++ mktemp ++ local LAST_ERR=/tmp/tmp.MEINJs2Ycc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FgVgo3bmno ++ cat /tmp/tmp.MEINJs2Ycc ++ rm /tmp/tmp.FgVgo3bmno /tmp/tmp.MEINJs2Ycc ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-6c964988db-5f6wk ++ mktemp + local LAST_OUT=/tmp/tmp.D1c1YFBwMD ++ mktemp + local LAST_ERR=/tmp/tmp.v38wIb0uta + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs percona-server-mongodb-operator-6c964988db-5f6wk + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.D1c1YFBwMD + cat /tmp/tmp.v38wIb0uta + rm /tmp/tmp.D1c1YFBwMD /tmp/tmp.v38wIb0uta + return 0 2025-11-10T01:44:59.542Z INFO setup Manager starting up {"gitCommit": "03bacb5d8b80cb442864368f4751bd8590422177", "gitBranch": "PR-2109-03bacb5d", "buildTime": "", "goVersion": "go1.25.4", "os": "linux", "arch": "amd64"} + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.JmzzznCy09 ++ mktemp + local LAST_ERR=/tmp/tmp.Ox1oNzehjQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JmzzznCy09 deployment.apps/psmdb-client created secret/cross-site-sharded-main-secrets created secret/cross-site-sharded-main-ssl created secret/cross-site-sharded-main-ssl-internal created + cat /tmp/tmp.Ox1oNzehjQ + rm /tmp/tmp.JmzzznCy09 /tmp/tmp.Ox1oNzehjQ + return 0 + desc 'create main PSMDB cluster cross-site-sharded-main.' + set +o xtrace ----------------------------------------------------------------------------------- create main PSMDB cluster cross-site-sharded-main. ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/conf/cross-site-sharded-main.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/conf/cross-site-sharded-main.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/conf/cross-site-sharded-main.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2109-03bacb5d"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.9wBh4sQdhR ++ mktemp + local LAST_ERR=/tmp/tmp.M7su3V51r7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9wBh4sQdhR perconaservermongodb.psmdb.percona.com/cross-site-sharded-main created + cat /tmp/tmp.M7su3V51r7 + rm /tmp/tmp.9wBh4sQdhR /tmp/tmp.M7su3V51r7 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running cross-site-sharded-main-rs0 3 + local name=cross-site-sharded-main-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=cross-site-sharded-main ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod cross-site-sharded-main-rs0-0 + local pod=cross-site-sharded-main-rs0-0 + set +o xtrace waiting for pod/cross-site-sharded-main-rs0-0 to be ready...........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod cross-site-sharded-main-rs0-1 + local pod=cross-site-sharded-main-rs0-1 + set +o xtrace waiting for pod/cross-site-sharded-main-rs0-1 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2PPKxVKP3b +++ mktemp ++ local LAST_ERR=/tmp/tmp.k6KyBAliMU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2PPKxVKP3b ++ cat /tmp/tmp.k6KyBAliMU ++ rm /tmp/tmp.2PPKxVKP3b /tmp/tmp.k6KyBAliMU ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod cross-site-sharded-main-rs0-2 + local pod=cross-site-sharded-main-rs0-2 + set +o xtrace waiting for pod/cross-site-sharded-main-rs0-2 to be ready............OK ++ kubectl_bin get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VeADsGqo14 +++ mktemp ++ local LAST_ERR=/tmp/tmp.DY1CdnFCYp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VeADsGqo14 ++ cat /tmp/tmp.DY1CdnFCYp ++ rm /tmp/tmp.VeADsGqo14 /tmp/tmp.DY1CdnFCYp ++ return 0 + [[ false == \t\r\u\e ]] ++ kubectl_bin get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.X7Kh7G4Nwl +++ mktemp ++ local LAST_ERR=/tmp/tmp.JZXApR9yic ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.X7Kh7G4Nwl ++ cat /tmp/tmp.JZXApR9yic ++ rm /tmp/tmp.X7Kh7G4Nwl /tmp/tmp.JZXApR9yic ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness......................................... + wait_for_running cross-site-sharded-main-cfg 3 false + local name=cross-site-sharded-main-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=cross-site-sharded-main ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod cross-site-sharded-main-cfg-0 + local pod=cross-site-sharded-main-cfg-0 + set +o xtrace waiting for pod/cross-site-sharded-main-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod cross-site-sharded-main-cfg-1 + local pod=cross-site-sharded-main-cfg-1 + set +o xtrace waiting for pod/cross-site-sharded-main-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.u7JSeh4uj3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.jGiL93rVDL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.u7JSeh4uj3 ++ cat /tmp/tmp.jGiL93rVDL ++ rm /tmp/tmp.u7JSeh4uj3 /tmp/tmp.jGiL93rVDL ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod cross-site-sharded-main-cfg-2 + local pod=cross-site-sharded-main-cfg-2 + set +o xtrace waiting for pod/cross-site-sharded-main-cfg-2 to be ready.OK ++ kubectl_bin get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rq4MZwK8Nv +++ mktemp ++ local LAST_ERR=/tmp/tmp.fyvMBAbIdr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rq4MZwK8Nv ++ cat /tmp/tmp.fyvMBAbIdr ++ rm /tmp/tmp.rq4MZwK8Nv /tmp/tmp.fyvMBAbIdr ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GKfK0aV7yK +++ mktemp ++ local LAST_ERR=/tmp/tmp.Up3TMvQwd3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GKfK0aV7yK ++ cat /tmp/tmp.Up3TMvQwd3 ++ rm /tmp/tmp.GKfK0aV7yK /tmp/tmp.Up3TMvQwd3 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + desc 'create user' + set +o xtrace ----------------------------------------------------------------------------------- create user ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"user",pwd:"pass",roles:[{db:"app",role:"readWrite"}]})' userAdmin:userAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777 + local 'command=db.createUser({user:"user",pwd:"pass",roles:[{db:"app",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RvTnbPzBMH +++ mktemp ++ local LAST_ERR=/tmp/tmp.1yJ7Ec3gOr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RvTnbPzBMH ++ cat /tmp/tmp.1yJ7Ec3gOr ++ rm /tmp/tmp.RvTnbPzBMH /tmp/tmp.1yJ7Ec3gOr ++ return 0 + local client_container=psmdb-client-696897d69b-pjkvq + kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''db.createUser({user:"user",pwd:"pass",roles:[{db:"app",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.RvRoWZDELL ++ mktemp + local LAST_ERR=/tmp/tmp.vkUkrZgMV8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''db.createUser({user:"user",pwd:"pass",roles:[{db:"app",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RvRoWZDELL Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("6b31bc54-769f-46b6-b198-ebce85432834") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match Successfully added user: { "user" : "user", "roles" : [ { "db" : "app", "role" : "readWrite" } ] } bye + cat /tmp/tmp.vkUkrZgMV8 + rm /tmp/tmp.RvRoWZDELL /tmp/tmp.vkUkrZgMV8 + return 0 + sleep 2 + desc 'set chunk size to 2 MB' + set +o xtrace ----------------------------------------------------------------------------------- set chunk size to 2 MB ----------------------------------------------------------------------------------- + run_mongos 'use config\n db.settings.save( { _id:"chunksize", value: 2 } )' clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777 + local 'command=use config\n db.settings.save( { _id:"chunksize", value: 2 } )' + local uri=clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IwTmctCXxr +++ mktemp ++ local LAST_ERR=/tmp/tmp.xmTNagHtp3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IwTmctCXxr ++ cat /tmp/tmp.xmTNagHtp3 ++ rm /tmp/tmp.IwTmctCXxr /tmp/tmp.xmTNagHtp3 ++ return 0 + local client_container=psmdb-client-696897d69b-pjkvq + kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''use config\n db.settings.save( { _id:"chunksize", value: 2 } )\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.D2xFfYJIbL ++ mktemp + local LAST_ERR=/tmp/tmp.LMNY13UemS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''use config\n db.settings.save( { _id:"chunksize", value: 2 } )\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.D2xFfYJIbL Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("d6365663-dd22-4e50-85a8-c1d45d532652") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db config WriteResult({ "nMatched" : 0, "nUpserted" : 1, "nModified" : 0, "_id" : "chunksize" }) bye + cat /tmp/tmp.LMNY13UemS + rm /tmp/tmp.D2xFfYJIbL /tmp/tmp.LMNY13UemS + return 0 + sleep 2 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + run_script_mongos /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/data.js user:pass@cross-site-sharded-main-mongos.cross-site-sharded-5777 + local script=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/data.js + local uri=user:pass@cross-site-sharded-main-mongos.cross-site-sharded-5777 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local mongo_bin=mongo ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9Sz3ARl1yG +++ mktemp ++ local LAST_ERR=/tmp/tmp.9RkND5p5ny ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9Sz3ARl1yG ++ cat /tmp/tmp.9RkND5p5ny ++ rm /tmp/tmp.9Sz3ARl1yG /tmp/tmp.9RkND5p5ny ++ return 0 + local client_container=psmdb-client-696897d69b-pjkvq ++ basename /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/data.js + name=data.js + kubectl_bin cp /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/data.js cross-site-sharded-5777/psmdb-client-696897d69b-pjkvq:/tmp ++ mktemp + local LAST_OUT=/tmp/tmp.D3Lh3IhOIM ++ mktemp + local LAST_ERR=/tmp/tmp.xntfWfTnpi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl cp /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/data.js cross-site-sharded-5777/psmdb-client-696897d69b-pjkvq:/tmp + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.D3Lh3IhOIM + cat /tmp/tmp.xntfWfTnpi + rm /tmp/tmp.D3Lh3IhOIM /tmp/tmp.xntfWfTnpi + return 0 + kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'mongo mongodb://user:pass@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local/admin /tmp/data.js' ++ mktemp + local LAST_OUT=/tmp/tmp.tsIPyODVge ++ mktemp + local LAST_ERR=/tmp/tmp.7ZFFqbnzPI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'mongo mongodb://user:pass@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local/admin /tmp/data.js' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tsIPyODVge Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("6f6fa74e-de28-4252-a8d4-f0b9878f0885") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match + cat /tmp/tmp.7ZFFqbnzPI + rm /tmp/tmp.tsIPyODVge /tmp/tmp.7ZFFqbnzPI + return 0 + desc 'shard collection' + set +o xtrace ----------------------------------------------------------------------------------- shard collection ----------------------------------------------------------------------------------- + run_mongos 'sh.enableSharding("app")' clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777 + local 'command=sh.enableSharding("app")' + local uri=clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.j95gfSmu8K +++ mktemp ++ local LAST_ERR=/tmp/tmp.rNxECkeUZe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.j95gfSmu8K ++ cat /tmp/tmp.rNxECkeUZe ++ rm /tmp/tmp.j95gfSmu8K /tmp/tmp.rNxECkeUZe ++ return 0 + local client_container=psmdb-client-696897d69b-pjkvq + kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''sh.enableSharding("app")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.cxUBqpXWRw ++ mktemp + local LAST_ERR=/tmp/tmp.EYIvPKsNEa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''sh.enableSharding("app")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cxUBqpXWRw Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("4116fc76-f1f3-423e-bf46-f61ebc030d99") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1762739366, 1), "signature" : { "hash" : BinData(0,"XkP0VcgFJqQx43O6k1mmaazH7jw="), "keyId" : NumberLong("7570907206787268620") } }, "operationTime" : Timestamp(1762739366, 1) } bye + cat /tmp/tmp.EYIvPKsNEa + rm /tmp/tmp.cxUBqpXWRw /tmp/tmp.EYIvPKsNEa + return 0 + sleep 2 + run_mongos 'sh.shardCollection("app.city", { _id: 1 } )' clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777 + local 'command=sh.shardCollection("app.city", { _id: 1 } )' + local uri=clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XsKZjGxaVr +++ mktemp ++ local LAST_ERR=/tmp/tmp.jF7U0KKVOS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XsKZjGxaVr ++ cat /tmp/tmp.jF7U0KKVOS ++ rm /tmp/tmp.XsKZjGxaVr /tmp/tmp.jF7U0KKVOS ++ return 0 + local client_container=psmdb-client-696897d69b-pjkvq + kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''sh.shardCollection("app.city", { _id: 1 } )\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.0amdRqpupE ++ mktemp + local LAST_ERR=/tmp/tmp.yyLaQ6ClDt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''sh.shardCollection("app.city", { _id: 1 } )\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0amdRqpupE Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("dc064231-767b-4640-951d-f36bfe775edc") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match { "collectionsharded" : "app.city", "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1762739370, 42), "signature" : { "hash" : BinData(0,"piCp9IjoKFe2tH2bfSD07J+n/vY="), "keyId" : NumberLong("7570907206787268620") } }, "operationTime" : Timestamp(1762739370, 41) } bye + cat /tmp/tmp.yyLaQ6ClDt + rm /tmp/tmp.0amdRqpupE /tmp/tmp.yyLaQ6ClDt + return 0 + sleep 120 + desc 'Check chunks' + set +o xtrace ----------------------------------------------------------------------------------- Check chunks ----------------------------------------------------------------------------------- + chunks_param1=ns + chunks_param2='"app.city"' + [[ 8.0 != \4\.\4 ]] + chunks_param1=uuid ++ run_mongos 'use app\n db.getCollectionInfos({ "name": "city" })[0].info.uuid' user:pass@cross-site-sharded-main-mongos.cross-site-sharded-5777 ++ local 'command=use app\n db.getCollectionInfos({ "name": "city" })[0].info.uuid' ++ local uri=user:pass@cross-site-sharded-main-mongos.cross-site-sharded-5777 ++ grep 'switched to db app' -A 1 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ grep -v 'switched to db app' +++ echo .svc.cluster.local +++ awk -F: '{print $2}' ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.HIi5XDVUvY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SvAdducD6i +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.HIi5XDVUvY +++ cat /tmp/tmp.SvAdducD6i +++ rm /tmp/tmp.HIi5XDVUvY /tmp/tmp.SvAdducD6i +++ return 0 ++ local client_container=psmdb-client-696897d69b-pjkvq ++ kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''use app\n db.getCollectionInfos({ "name": "city" })[0].info.uuid\n'\'' | mongo mongodb://user:pass@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DZKCJJ2ohu +++ mktemp ++ local LAST_ERR=/tmp/tmp.90l80zQBqc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''use app\n db.getCollectionInfos({ "name": "city" })[0].info.uuid\n'\'' | mongo mongodb://user:pass@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DZKCJJ2ohu ++ cat /tmp/tmp.90l80zQBqc ++ rm /tmp/tmp.DZKCJJ2ohu /tmp/tmp.90l80zQBqc ++ return 0 + chunks_param2='UUID("0e78456a-aa18-4fdf-983c-6c22f54d59eb")' + shards=0 + for i in "rs0" "rs1" ++ run_mongos 'use config\n db.chunks.count({"uuid": UUID("0e78456a-aa18-4fdf-983c-6c22f54d59eb"), "shard": "rs0"})' clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777 ++ local 'command=use config\n db.chunks.count({"uuid": UUID("0e78456a-aa18-4fdf-983c-6c22f54d59eb"), "shard": "rs0"})' ++ local uri=clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ grep 'switched to db config' -A 1 ++ grep -v 'switched to db config' +++ echo .svc.cluster.local +++ awk -F: '{print $2}' ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.sqPf3NPxdd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7CMw6LLOAL +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.sqPf3NPxdd +++ cat /tmp/tmp.7CMw6LLOAL +++ rm /tmp/tmp.sqPf3NPxdd /tmp/tmp.7CMw6LLOAL +++ return 0 ++ local client_container=psmdb-client-696897d69b-pjkvq ++ kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''use config\n db.chunks.count({"uuid": UUID("0e78456a-aa18-4fdf-983c-6c22f54d59eb"), "shard": "rs0"})\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jz4WCy9pIN +++ mktemp ++ local LAST_ERR=/tmp/tmp.9aSCCnfonZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''use config\n db.chunks.count({"uuid": UUID("0e78456a-aa18-4fdf-983c-6c22f54d59eb"), "shard": "rs0"})\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jz4WCy9pIN ++ cat /tmp/tmp.9aSCCnfonZ ++ rm /tmp/tmp.jz4WCy9pIN /tmp/tmp.9aSCCnfonZ ++ return 0 + out=1 + desc 'rs0 has 1 chunks' + set +o xtrace ----------------------------------------------------------------------------------- rs0 has 1 chunks ----------------------------------------------------------------------------------- + [[ 1 -ne 0 ]] + (( shards = shards + 1 )) + for i in "rs0" "rs1" ++ run_mongos 'use config\n db.chunks.count({"uuid": UUID("0e78456a-aa18-4fdf-983c-6c22f54d59eb"), "shard": "rs1"})' clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777 ++ grep 'switched to db config' -A 1 ++ local 'command=use config\n db.chunks.count({"uuid": UUID("0e78456a-aa18-4fdf-983c-6c22f54d59eb"), "shard": "rs1"})' ++ local uri=clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ grep -v 'switched to db config' +++ echo .svc.cluster.local +++ awk -F: '{print $2}' ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.b8h1QSPZLw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.eHCFXCJrVB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.b8h1QSPZLw +++ cat /tmp/tmp.eHCFXCJrVB +++ rm /tmp/tmp.b8h1QSPZLw /tmp/tmp.eHCFXCJrVB +++ return 0 ++ local client_container=psmdb-client-696897d69b-pjkvq ++ kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''use config\n db.chunks.count({"uuid": UUID("0e78456a-aa18-4fdf-983c-6c22f54d59eb"), "shard": "rs1"})\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zuBOF5tmfn +++ mktemp ++ local LAST_ERR=/tmp/tmp.nPh8VUrtEu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''use config\n db.chunks.count({"uuid": UUID("0e78456a-aa18-4fdf-983c-6c22f54d59eb"), "shard": "rs1"})\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zuBOF5tmfn ++ cat /tmp/tmp.nPh8VUrtEu ++ rm /tmp/tmp.zuBOF5tmfn /tmp/tmp.nPh8VUrtEu ++ return 0 + out=3 + desc 'rs1 has 3 chunks' + set +o xtrace ----------------------------------------------------------------------------------- rs1 has 3 chunks ----------------------------------------------------------------------------------- + [[ 3 -ne 0 ]] + (( shards = shards + 1 )) + [[ 2 -lt 2 ]] + desc 'create replica cluster' + set +o xtrace ----------------------------------------------------------------------------------- create replica cluster ----------------------------------------------------------------------------------- + create_namespace cross-site-sharded-replica-519 0 + local namespace=cross-site-sharded-replica-519 + local skip_clean_namespace=0 + [[ 1 == 1 ]] + [[ -z 0 ]] + '[' -n '' ']' + desc 'cleaned up old namespaces cross-site-sharded-replica-519' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces cross-site-sharded-replica-519 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace cross-site-sharded-replica-519 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.xpCDMDe5Ju ++ mktemp + local LAST_ERR=/tmp/tmp.95a2AEVX4U + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace cross-site-sharded-replica-519 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xpCDMDe5Ju + cat /tmp/tmp.95a2AEVX4U + rm /tmp/tmp.xpCDMDe5Ju /tmp/tmp.95a2AEVX4U + return 0 + kubectl_bin wait --for=delete namespace cross-site-sharded-replica-519 ++ mktemp + local LAST_OUT=/tmp/tmp.MjCnQ1wuSm ++ mktemp + local LAST_ERR=/tmp/tmp.x8TfuLKcIM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace cross-site-sharded-replica-519 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MjCnQ1wuSm + cat /tmp/tmp.x8TfuLKcIM + rm /tmp/tmp.MjCnQ1wuSm /tmp/tmp.x8TfuLKcIM + return 0 + desc 'create namespace cross-site-sharded-replica-519' + set +o xtrace ----------------------------------------------------------------------------------- create namespace cross-site-sharded-replica-519 ----------------------------------------------------------------------------------- + kubectl_bin create namespace cross-site-sharded-replica-519 ++ mktemp + local LAST_OUT=/tmp/tmp.1oRn4VW9j3 ++ mktemp + local LAST_ERR=/tmp/tmp.kWtRpZUwTc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace cross-site-sharded-replica-519 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1oRn4VW9j3 namespace/cross-site-sharded-replica-519 created + cat /tmp/tmp.kWtRpZUwTc + rm /tmp/tmp.1oRn4VW9j3 /tmp/tmp.kWtRpZUwTc + return 0 + set_kube_ctx cross-site-sharded-replica-519 + local namespace=cross-site-sharded-replica-519 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.3G9lORJWMA +++ mktemp ++ local LAST_ERR=/tmp/tmp.u5tbevrUez ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3G9lORJWMA ++ cat /tmp/tmp.u5tbevrUez ++ rm /tmp/tmp.3G9lORJWMA /tmp/tmp.u5tbevrUez ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2109-03bacb5d-1-cluster3 --namespace=cross-site-sharded-replica-519 ++ mktemp + local LAST_OUT=/tmp/tmp.XQ6B2SSQRn ++ mktemp + local LAST_ERR=/tmp/tmp.Ul47cbnrhP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2109-03bacb5d-1-cluster3 --namespace=cross-site-sharded-replica-519 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XQ6B2SSQRn Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2109-03bacb5d-1-cluster3" modified. + cat /tmp/tmp.Ul47cbnrhP + rm /tmp/tmp.XQ6B2SSQRn /tmp/tmp.Ul47cbnrhP + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2109-03bacb5d' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2109-03bacb5d ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.rjSqUqjtAN ++ mktemp + local LAST_ERR=/tmp/tmp.CHz0sguihV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rjSqUqjtAN customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.CHz0sguihV + rm /tmp/tmp.rjSqUqjtAN /tmp/tmp.CHz0sguihV + return 0 + '[' -n '' ']' + apply_rbac rbac + local operator_namespace=psmdb-operator + local rbac=rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/deploy/rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.EsNbrNVofi ++ mktemp + local LAST_ERR=/tmp/tmp.FCKlSkHIA5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EsNbrNVofi role.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created rolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.FCKlSkHIA5 + rm /tmp/tmp.EsNbrNVofi /tmp/tmp.FCKlSkHIA5 + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2109-03bacb5d") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/deploy/operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.SKa5DnS93l ++ mktemp + local LAST_ERR=/tmp/tmp.ikwygh87Ux + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SKa5DnS93l deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.ikwygh87Ux + rm /tmp/tmp.SKa5DnS93l /tmp/tmp.ikwygh87Ux + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IxWrSU6KEd +++ mktemp ++ local LAST_ERR=/tmp/tmp.QOF6FOxzGe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IxWrSU6KEd ++ cat /tmp/tmp.QOF6FOxzGe ++ rm /tmp/tmp.IxWrSU6KEd /tmp/tmp.QOF6FOxzGe ++ return 0 + wait_operator_pod percona-server-mongodb-operator-6c964988db-tgkpw + local pod=percona-server-mongodb-operator-6c964988db-tgkpw + set +o xtrace waiting for pod/percona-server-mongodb-operator-6c964988db-tgkpw to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7OyfkZqzMX +++ mktemp ++ local LAST_ERR=/tmp/tmp.s6Jwb54iW5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7OyfkZqzMX ++ cat /tmp/tmp.s6Jwb54iW5 ++ rm /tmp/tmp.7OyfkZqzMX /tmp/tmp.s6Jwb54iW5 ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-6c964988db-tgkpw ++ mktemp + local LAST_OUT=/tmp/tmp.rr0XYLbtE3 ++ mktemp + local LAST_ERR=/tmp/tmp.axSsfnfIlp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs percona-server-mongodb-operator-6c964988db-tgkpw + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rr0XYLbtE3 + cat /tmp/tmp.axSsfnfIlp + rm /tmp/tmp.rr0XYLbtE3 /tmp/tmp.axSsfnfIlp + return 0 2025-11-10T01:51:52.695Z INFO setup Manager starting up {"gitCommit": "03bacb5d8b80cb442864368f4751bd8590422177", "gitBranch": "PR-2109-03bacb5d", "buildTime": "", "goVersion": "go1.25.4", "os": "linux", "arch": "amd64"} + desc 'start client' + set +o xtrace ----------------------------------------------------------------------------------- start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ihZnISJm8h ++ mktemp + local LAST_ERR=/tmp/tmp.rSskPzD4Ge + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ihZnISJm8h deployment.apps/psmdb-client created + cat /tmp/tmp.rSskPzD4Ge + rm /tmp/tmp.ihZnISJm8h /tmp/tmp.rSskPzD4Ge + return 0 + desc 'copy secrets from main to replica namespace and create all of them' + set +o xtrace ----------------------------------------------------------------------------------- copy secrets from main to replica namespace and create all of them ----------------------------------------------------------------------------------- + kubectl get secret cross-site-sharded-main-secrets -o yaml -n cross-site-sharded-5777 + yq eval ' del(.metadata) | (.metadata.name = "cross-site-sharded-replica-secrets")' - + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.RWk9PfWEin ++ mktemp + local LAST_ERR=/tmp/tmp.Dst3YD3tCl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RWk9PfWEin secret/cross-site-sharded-replica-secrets created + cat /tmp/tmp.Dst3YD3tCl + rm /tmp/tmp.RWk9PfWEin /tmp/tmp.Dst3YD3tCl + return 0 + kubectl_bin get secret cross-site-sharded-main-ssl-internal -o yaml -n cross-site-sharded-5777 + yq eval ' del(.metadata) | del(.status) | (.metadata.name = "cross-site-sharded-replica-ssl-internal")' - ++ mktemp + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.kS2vCvrtgh ++ mktemp + local LAST_OUT=/tmp/tmp.ZQpXvsCVJS + local LAST_ERR=/tmp/tmp.de5IYBFFSN + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.IeZWVrl5aG + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get secret cross-site-sharded-main-ssl-internal -o yaml -n cross-site-sharded-5777 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kS2vCvrtgh + cat /tmp/tmp.de5IYBFFSN + rm /tmp/tmp.kS2vCvrtgh /tmp/tmp.de5IYBFFSN + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZQpXvsCVJS secret/cross-site-sharded-replica-ssl-internal created + cat /tmp/tmp.IeZWVrl5aG + rm /tmp/tmp.ZQpXvsCVJS /tmp/tmp.IeZWVrl5aG + return 0 + kubectl_bin get secret cross-site-sharded-main-ssl -o yaml -n cross-site-sharded-5777 + yq eval ' del(.metadata) | del(.status) | (.metadata.name = "cross-site-sharded-replica-ssl")' - + kubectl_bin apply -f - ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.I5N3LcA2ys + local LAST_OUT=/tmp/tmp.4EOOoY8dMG ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.C6fqH69KXZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.PM11veZn37 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get secret cross-site-sharded-main-ssl -o yaml -n cross-site-sharded-5777 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I5N3LcA2ys + cat /tmp/tmp.C6fqH69KXZ + rm /tmp/tmp.I5N3LcA2ys /tmp/tmp.C6fqH69KXZ + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4EOOoY8dMG secret/cross-site-sharded-replica-ssl created + cat /tmp/tmp.PM11veZn37 + rm /tmp/tmp.4EOOoY8dMG /tmp/tmp.PM11veZn37 + return 0 + sleep 30 + desc 'create replica PSMDB cluster ' + set +o xtrace ----------------------------------------------------------------------------------- create replica PSMDB cluster ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/conf/cross-site-sharded-replica.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/conf/cross-site-sharded-replica.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/conf/cross-site-sharded-replica.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2109-03bacb5d"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.NFf44AybBW ++ mktemp + local LAST_ERR=/tmp/tmp.3YgYc99lKr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NFf44AybBW perconaservermongodb.psmdb.percona.com/cross-site-sharded-replica created + cat /tmp/tmp.3YgYc99lKr + rm /tmp/tmp.NFf44AybBW /tmp/tmp.3YgYc99lKr + return 0 + wait_for_running cross-site-sharded-replica-rs0 3 false + local name=cross-site-sharded-replica-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=cross-site-sharded-replica ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod cross-site-sharded-replica-rs0-0 + local pod=cross-site-sharded-replica-rs0-0 + set +o xtrace waiting for pod/cross-site-sharded-replica-rs0-0 to be ready...........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod cross-site-sharded-replica-rs0-1 + local pod=cross-site-sharded-replica-rs0-1 + set +o xtrace waiting for pod/cross-site-sharded-replica-rs0-1 to be ready..............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.c9Fr5lzdeT +++ mktemp ++ local LAST_ERR=/tmp/tmp.smaKqPvPvP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.c9Fr5lzdeT ++ cat /tmp/tmp.smaKqPvPvP ++ rm /tmp/tmp.c9Fr5lzdeT /tmp/tmp.smaKqPvPvP ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod cross-site-sharded-replica-rs0-2 + local pod=cross-site-sharded-replica-rs0-2 + set +o xtrace waiting for pod/cross-site-sharded-replica-rs0-2 to be ready.............OK ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jhwi5eaSpu +++ mktemp ++ local LAST_ERR=/tmp/tmp.lFeuvGDenc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jhwi5eaSpu ++ cat /tmp/tmp.lFeuvGDenc ++ rm /tmp/tmp.jhwi5eaSpu /tmp/tmp.lFeuvGDenc ++ return 0 + [[ false == \t\r\u\e ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QBMDq758e0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.tNVT7juP2S ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QBMDq758e0 ++ cat /tmp/tmp.tNVT7juP2S ++ rm /tmp/tmp.QBMDq758e0 /tmp/tmp.tNVT7juP2S ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running cross-site-sharded-replica-rs1 3 false + local name=cross-site-sharded-replica-rs1 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs1 + local cluster_name=cross-site-sharded-replica ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod cross-site-sharded-replica-rs1-0 + local pod=cross-site-sharded-replica-rs1-0 + set +o xtrace waiting for pod/cross-site-sharded-replica-rs1-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod cross-site-sharded-replica-rs1-1 + local pod=cross-site-sharded-replica-rs1-1 + set +o xtrace waiting for pod/cross-site-sharded-replica-rs1-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6qQZ56bAkd +++ mktemp ++ local LAST_ERR=/tmp/tmp.mBZtUaGQEP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6qQZ56bAkd ++ cat /tmp/tmp.mBZtUaGQEP ++ rm /tmp/tmp.6qQZ56bAkd /tmp/tmp.mBZtUaGQEP ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod cross-site-sharded-replica-rs1-2 + local pod=cross-site-sharded-replica-rs1-2 + set +o xtrace waiting for pod/cross-site-sharded-replica-rs1-2 to be ready.OK ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EoyPaTeWtq +++ mktemp ++ local LAST_ERR=/tmp/tmp.mlHi6W6soJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EoyPaTeWtq ++ cat /tmp/tmp.mlHi6W6soJ ++ rm /tmp/tmp.EoyPaTeWtq /tmp/tmp.mlHi6W6soJ ++ return 0 + [[ false == \t\r\u\e ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mx5mnMCmJa +++ mktemp ++ local LAST_ERR=/tmp/tmp.wD6NUSOTlk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mx5mnMCmJa ++ cat /tmp/tmp.wD6NUSOTlk ++ rm /tmp/tmp.mx5mnMCmJa /tmp/tmp.wD6NUSOTlk ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running cross-site-sharded-replica-cfg 3 false + local name=cross-site-sharded-replica-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=cross-site-sharded-replica ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod cross-site-sharded-replica-cfg-0 + local pod=cross-site-sharded-replica-cfg-0 + set +o xtrace waiting for pod/cross-site-sharded-replica-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod cross-site-sharded-replica-cfg-1 + local pod=cross-site-sharded-replica-cfg-1 + set +o xtrace waiting for pod/cross-site-sharded-replica-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Fd5ztEAsLw +++ mktemp ++ local LAST_ERR=/tmp/tmp.jh1xPFrfob ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Fd5ztEAsLw ++ cat /tmp/tmp.jh1xPFrfob ++ rm /tmp/tmp.Fd5ztEAsLw /tmp/tmp.jh1xPFrfob ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod cross-site-sharded-replica-cfg-2 + local pod=cross-site-sharded-replica-cfg-2 + set +o xtrace waiting for pod/cross-site-sharded-replica-cfg-2 to be ready.OK ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.W6dh7l9M4Y +++ mktemp ++ local LAST_ERR=/tmp/tmp.eBcflaOGZ8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.W6dh7l9M4Y ++ cat /tmp/tmp.eBcflaOGZ8 ++ rm /tmp/tmp.W6dh7l9M4Y /tmp/tmp.eBcflaOGZ8 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Vcq27TQsKQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.rFFYjRzGuL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Vcq27TQsKQ ++ cat /tmp/tmp.rFFYjRzGuL ++ rm /tmp/tmp.Vcq27TQsKQ /tmp/tmp.rFFYjRzGuL ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] ++ get_service_ip cross-site-sharded-replica-cfg-0 cfg ++ local service=cross-site-sharded-replica-cfg-0 ++ local server_type=cfg +++ kubectl_bin get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.SNKaWSH3jV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0ogCITW5qA +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.SNKaWSH3jV +++ cat /tmp/tmp.0ogCITW5qA +++ rm /tmp/tmp.SNKaWSH3jV /tmp/tmp.0ogCITW5qA +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cross-site-sharded-replica-cfg-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-sharded-replica-cfg-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.h5ZpyaqW8T ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YH2y4uAH8Y +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cross-site-sharded-replica-cfg-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.h5ZpyaqW8T +++ cat /tmp/tmp.YH2y4uAH8Y +++ rm /tmp/tmp.h5ZpyaqW8T /tmp/tmp.YH2y4uAH8Y +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-sharded-replica-cfg-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.G0UtJywBeE +++ mktemp ++ local LAST_ERR=/tmp/tmp.MBkqgHToR5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cross-site-sharded-replica-cfg-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.G0UtJywBeE ++ cat /tmp/tmp.MBkqgHToR5 ++ rm /tmp/tmp.G0UtJywBeE /tmp/tmp.MBkqgHToR5 ++ return 0 ++ return + replica_cfg_0_endpoint=34.118.238.30 ++ get_service_ip cross-site-sharded-replica-cfg-1 cfg ++ local service=cross-site-sharded-replica-cfg-1 ++ local server_type=cfg +++ kubectl_bin get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.s5eAbjQ6B0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AGUo1xEYNs +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.s5eAbjQ6B0 +++ cat /tmp/tmp.AGUo1xEYNs +++ rm /tmp/tmp.s5eAbjQ6B0 /tmp/tmp.AGUo1xEYNs +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cross-site-sharded-replica-cfg-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-sharded-replica-cfg-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.m6o8rEaB8p ++++ mktemp +++ local LAST_ERR=/tmp/tmp.d872OCBWC5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cross-site-sharded-replica-cfg-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.m6o8rEaB8p +++ cat /tmp/tmp.d872OCBWC5 +++ rm /tmp/tmp.m6o8rEaB8p /tmp/tmp.d872OCBWC5 +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-sharded-replica-cfg-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4aLWRbls1z +++ mktemp ++ local LAST_ERR=/tmp/tmp.FZz7J5uOwx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cross-site-sharded-replica-cfg-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4aLWRbls1z ++ cat /tmp/tmp.FZz7J5uOwx ++ rm /tmp/tmp.4aLWRbls1z /tmp/tmp.FZz7J5uOwx ++ return 0 ++ return + replica_cfg_1_endpoint=34.118.227.190 ++ get_service_ip cross-site-sharded-replica-cfg-2 cfg ++ local service=cross-site-sharded-replica-cfg-2 ++ local server_type=cfg +++ kubectl_bin get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fCMNAmSLOf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.sx2RSb0E0c +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.fCMNAmSLOf +++ cat /tmp/tmp.sx2RSb0E0c +++ rm /tmp/tmp.fCMNAmSLOf /tmp/tmp.sx2RSb0E0c +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cross-site-sharded-replica-cfg-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-sharded-replica-cfg-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qK8oPrP3XS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.44kNTpqyoa +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cross-site-sharded-replica-cfg-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.qK8oPrP3XS +++ cat /tmp/tmp.44kNTpqyoa +++ rm /tmp/tmp.qK8oPrP3XS /tmp/tmp.44kNTpqyoa +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-sharded-replica-cfg-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vdU0olEh72 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ws6V6GI0qL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cross-site-sharded-replica-cfg-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vdU0olEh72 ++ cat /tmp/tmp.ws6V6GI0qL ++ rm /tmp/tmp.vdU0olEh72 /tmp/tmp.ws6V6GI0qL ++ return 0 ++ return + replica_cfg_2_endpoint=34.118.239.159 ++ get_service_ip cross-site-sharded-replica-rs0-0 ++ local service=cross-site-sharded-replica-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DvXaQzbDbI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Dg7DY2cWSo +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.DvXaQzbDbI +++ cat /tmp/tmp.Dg7DY2cWSo +++ rm /tmp/tmp.DvXaQzbDbI /tmp/tmp.Dg7DY2cWSo +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs0-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-sharded-replica-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7e503dTucD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.12Gb5DSQ00 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cross-site-sharded-replica-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.7e503dTucD +++ cat /tmp/tmp.12Gb5DSQ00 +++ rm /tmp/tmp.7e503dTucD /tmp/tmp.12Gb5DSQ00 +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs0-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XdRdiWFv0g +++ mktemp ++ local LAST_ERR=/tmp/tmp.1ROhIuYUfz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cross-site-sharded-replica-rs0-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XdRdiWFv0g ++ cat /tmp/tmp.1ROhIuYUfz ++ rm /tmp/tmp.XdRdiWFv0g /tmp/tmp.1ROhIuYUfz ++ return 0 ++ return + replica_rs0_0_endpoint=34.118.237.130 ++ get_service_ip cross-site-sharded-replica-rs0-1 ++ local service=cross-site-sharded-replica-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nVGy6LprYn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NZYHvOD4eO +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.nVGy6LprYn +++ cat /tmp/tmp.NZYHvOD4eO +++ rm /tmp/tmp.nVGy6LprYn /tmp/tmp.NZYHvOD4eO +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs0-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-sharded-replica-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.86gJHyOE0O ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vR6CvzprJl +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cross-site-sharded-replica-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.86gJHyOE0O +++ cat /tmp/tmp.vR6CvzprJl +++ rm /tmp/tmp.86gJHyOE0O /tmp/tmp.vR6CvzprJl +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs0-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Jrp9xdvUwc +++ mktemp ++ local LAST_ERR=/tmp/tmp.Y91BzTpF2F ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cross-site-sharded-replica-rs0-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Jrp9xdvUwc ++ cat /tmp/tmp.Y91BzTpF2F ++ rm /tmp/tmp.Jrp9xdvUwc /tmp/tmp.Y91BzTpF2F ++ return 0 ++ return + replica_rs0_1_endpoint=34.118.231.62 ++ get_service_ip cross-site-sharded-replica-rs0-2 ++ local service=cross-site-sharded-replica-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6hMMs6X5zb ++++ mktemp +++ local LAST_ERR=/tmp/tmp.m9evgjXKe6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6hMMs6X5zb +++ cat /tmp/tmp.m9evgjXKe6 +++ rm /tmp/tmp.6hMMs6X5zb /tmp/tmp.m9evgjXKe6 +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-sharded-replica-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.m280Lko2xW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.OZUkb1prp8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cross-site-sharded-replica-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.m280Lko2xW +++ cat /tmp/tmp.OZUkb1prp8 +++ rm /tmp/tmp.m280Lko2xW /tmp/tmp.OZUkb1prp8 +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs0-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.swScEjU9Rc +++ mktemp ++ local LAST_ERR=/tmp/tmp.wt2XKmttEk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cross-site-sharded-replica-rs0-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.swScEjU9Rc ++ cat /tmp/tmp.wt2XKmttEk ++ rm /tmp/tmp.swScEjU9Rc /tmp/tmp.wt2XKmttEk ++ return 0 ++ return + replica_rs0_2_endpoint=34.118.231.122 ++ get_service_ip cross-site-sharded-replica-rs1-0 rs1 ++ local service=cross-site-sharded-replica-rs1-0 ++ local server_type=rs1 +++ kubectl_bin get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dAEDIGlJeM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.EXEQL6sqDa +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.dAEDIGlJeM +++ cat /tmp/tmp.EXEQL6sqDa +++ rm /tmp/tmp.dAEDIGlJeM /tmp/tmp.EXEQL6sqDa +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/cross-site-sharded-replica-rs1-0 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/cross-site-sharded-replica-rs1-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2nkBiKB6Dh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.lmNeGpMhQT +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cross-site-sharded-replica-rs1-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2nkBiKB6Dh +++ cat /tmp/tmp.lmNeGpMhQT +++ rm /tmp/tmp.2nkBiKB6Dh /tmp/tmp.lmNeGpMhQT +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs1-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ee0FAzduZS +++ mktemp ++ local LAST_ERR=/tmp/tmp.3vuubeKK2J ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cross-site-sharded-replica-rs1-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ee0FAzduZS ++ cat /tmp/tmp.3vuubeKK2J ++ rm /tmp/tmp.Ee0FAzduZS /tmp/tmp.3vuubeKK2J ++ return 0 ++ return + replica_rs1_0_endpoint=34.118.237.241 ++ get_service_ip cross-site-sharded-replica-rs1-1 rs1 ++ local service=cross-site-sharded-replica-rs1-1 ++ local server_type=rs1 +++ kubectl_bin get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xOIVC7Gmfp ++++ mktemp +++ local LAST_ERR=/tmp/tmp.w9lByWpW5z +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.xOIVC7Gmfp +++ cat /tmp/tmp.w9lByWpW5z +++ rm /tmp/tmp.xOIVC7Gmfp /tmp/tmp.w9lByWpW5z +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs1-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-sharded-replica-rs1-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.lqMVGyZISG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UBYeCPPBEM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cross-site-sharded-replica-rs1-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.lqMVGyZISG +++ cat /tmp/tmp.UBYeCPPBEM +++ rm /tmp/tmp.lqMVGyZISG /tmp/tmp.UBYeCPPBEM +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs1-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pzm6paUUQR +++ mktemp ++ local LAST_ERR=/tmp/tmp.42wJy5rBb5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cross-site-sharded-replica-rs1-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pzm6paUUQR ++ cat /tmp/tmp.42wJy5rBb5 ++ rm /tmp/tmp.pzm6paUUQR /tmp/tmp.42wJy5rBb5 ++ return 0 ++ return + replica_rs1_1_endpoint=34.118.232.153 ++ get_service_ip cross-site-sharded-replica-rs1-2 rs1 ++ local service=cross-site-sharded-replica-rs1-2 ++ local server_type=rs1 +++ kubectl_bin get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.b9TkGzObzg ++++ mktemp +++ local LAST_ERR=/tmp/tmp.sI4Pq6IaDi +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.b9TkGzObzg +++ cat /tmp/tmp.sI4Pq6IaDi +++ rm /tmp/tmp.b9TkGzObzg /tmp/tmp.sI4Pq6IaDi +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs1-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-sharded-replica-rs1-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ce8SpTj7N8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8cPdabPDQZ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cross-site-sharded-replica-rs1-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ce8SpTj7N8 +++ cat /tmp/tmp.8cPdabPDQZ +++ rm /tmp/tmp.ce8SpTj7N8 /tmp/tmp.8cPdabPDQZ +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs1-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RVdnK5Bh7G +++ mktemp ++ local LAST_ERR=/tmp/tmp.v5FBwwXRa7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cross-site-sharded-replica-rs1-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RVdnK5Bh7G ++ cat /tmp/tmp.v5FBwwXRa7 ++ rm /tmp/tmp.RVdnK5Bh7G /tmp/tmp.v5FBwwXRa7 ++ return 0 ++ return + replica_rs1_2_endpoint=34.118.235.155 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.0YCPif8til +++ mktemp ++ local LAST_ERR=/tmp/tmp.1ImU1p2FMo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0YCPif8til ++ cat /tmp/tmp.1ImU1p2FMo ++ rm /tmp/tmp.0YCPif8til /tmp/tmp.1ImU1p2FMo ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2109-03bacb5d-1-cluster3 --namespace=cross-site-sharded-5777 ++ mktemp + local LAST_OUT=/tmp/tmp.aOfnoyD2EH ++ mktemp + local LAST_ERR=/tmp/tmp.YX4PsHrrKp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2109-03bacb5d-1-cluster3 --namespace=cross-site-sharded-5777 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aOfnoyD2EH Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2109-03bacb5d-1-cluster3" modified. + cat /tmp/tmp.YX4PsHrrKp + rm /tmp/tmp.aOfnoyD2EH /tmp/tmp.YX4PsHrrKp + return 0 + kubectl_bin patch psmdb cross-site-sharded-main --type=merge --patch '{ "spec": {"replsets":[ {"affinity":{"antiAffinityTopologyKey": "none"},"arbiter":{"affinity":{"antiAffinityTopologyKey": "none"},"enabled":false,"size":1},"expose":{"enabled":true,"type":"ClusterIp"},"externalNodes":[{"host":"34.118.237.130","priority":0,"votes":0},{"host":"34.118.231.62","port":27017,"priority":1,"votes":1},{"host":"34.118.231.122", "port":27017,"priority":1,"votes":1}],"name":"rs0","nonvoting":{"affinity":{"antiAffinityTopologyKey":"none"},"enabled":false,"podDisruptionBudget":{"maxUnavailable":1},"resources":{"limits":{"cpu":"300m","memory":"0.5G"},"requests":{"cpu":"300m","memory":"0.5G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"1Gi"}}}}},"podDisruptionBudget":{"maxUnavailable":1},"resources":{"limits":{"cpu":"300m","memory":"0.5G"},"requests":{"cpu":"300m","memory":"0.5G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}, {"affinity":{"antiAffinityTopologyKey": "none"},"arbiter":{"affinity":{"antiAffinityTopologyKey": "none"},"enabled":false,"size":1},"expose":{"enabled":true,"type":"ClusterIp"},"externalNodes":[{"host":"34.118.237.241","priority":0,"votes":0},{"host":"34.118.232.153","port":27017,"priority":1,"votes":1},{"host":"34.118.235.155", "port":27017,"priority":1,"votes":1}],"name":"rs1","nonvoting":{"affinity":{"antiAffinityTopologyKey":"none"},"enabled":false,"podDisruptionBudget":{"maxUnavailable":1},"resources":{"limits":{"cpu":"300m","memory":"0.5G"},"requests":{"cpu":"300m","memory":"0.5G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"1Gi"}}}}},"podDisruptionBudget":{"maxUnavailable":1},"resources":{"limits":{"cpu":"300m","memory":"0.5G"},"requests":{"cpu":"300m","memory":"0.5G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}} ], "sharding":{"configsvrReplSet":{ "externalNodes": [{"host":"34.118.238.30","priority":1,"votes":1 },{"host":"34.118.227.190", "priority":1,"votes":1},{"host":"34.118.239.159","priority":0,"votes":0}]}} } }' ++ mktemp + local LAST_OUT=/tmp/tmp.P2vqkVG5p1 ++ mktemp + local LAST_ERR=/tmp/tmp.1flzAVsSqx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb cross-site-sharded-main --type=merge --patch '{ "spec": {"replsets":[ {"affinity":{"antiAffinityTopologyKey": "none"},"arbiter":{"affinity":{"antiAffinityTopologyKey": "none"},"enabled":false,"size":1},"expose":{"enabled":true,"type":"ClusterIp"},"externalNodes":[{"host":"34.118.237.130","priority":0,"votes":0},{"host":"34.118.231.62","port":27017,"priority":1,"votes":1},{"host":"34.118.231.122", "port":27017,"priority":1,"votes":1}],"name":"rs0","nonvoting":{"affinity":{"antiAffinityTopologyKey":"none"},"enabled":false,"podDisruptionBudget":{"maxUnavailable":1},"resources":{"limits":{"cpu":"300m","memory":"0.5G"},"requests":{"cpu":"300m","memory":"0.5G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"1Gi"}}}}},"podDisruptionBudget":{"maxUnavailable":1},"resources":{"limits":{"cpu":"300m","memory":"0.5G"},"requests":{"cpu":"300m","memory":"0.5G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}, {"affinity":{"antiAffinityTopologyKey": "none"},"arbiter":{"affinity":{"antiAffinityTopologyKey": "none"},"enabled":false,"size":1},"expose":{"enabled":true,"type":"ClusterIp"},"externalNodes":[{"host":"34.118.237.241","priority":0,"votes":0},{"host":"34.118.232.153","port":27017,"priority":1,"votes":1},{"host":"34.118.235.155", "port":27017,"priority":1,"votes":1}],"name":"rs1","nonvoting":{"affinity":{"antiAffinityTopologyKey":"none"},"enabled":false,"podDisruptionBudget":{"maxUnavailable":1},"resources":{"limits":{"cpu":"300m","memory":"0.5G"},"requests":{"cpu":"300m","memory":"0.5G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"1Gi"}}}}},"podDisruptionBudget":{"maxUnavailable":1},"resources":{"limits":{"cpu":"300m","memory":"0.5G"},"requests":{"cpu":"300m","memory":"0.5G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}} ], "sharding":{"configsvrReplSet":{ "externalNodes": [{"host":"34.118.238.30","priority":1,"votes":1 },{"host":"34.118.227.190", "priority":1,"votes":1},{"host":"34.118.239.159","priority":0,"votes":0}]}} } }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.P2vqkVG5p1 perconaservermongodb.psmdb.percona.com/cross-site-sharded-main patched + cat /tmp/tmp.1flzAVsSqx + rm /tmp/tmp.P2vqkVG5p1 /tmp/tmp.1flzAVsSqx + return 0 + wait_for_members 34.118.238.30 cfg 6 + local endpoint=34.118.238.30 + local rsName=cfg + local target_count=6 + local nodes_count=0 + [[ 0 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.238.30 mongodb :27017 ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.238.30 ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.AQ4JmsTjpE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gw1C012IkP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.AQ4JmsTjpE +++ cat /tmp/tmp.gw1C012IkP +++ rm /tmp/tmp.AQ4JmsTjpE /tmp/tmp.gw1C012IkP +++ return 0 ++ local client_container=psmdb-client-696897d69b-pjkvq ++ kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.99wVeRv3GY +++ mktemp ++ local LAST_ERR=/tmp/tmp.HqnZGTdIb7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.99wVeRv3GY ++ cat /tmp/tmp.HqnZGTdIb7 command terminated with exit code 1 ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.99wVeRv3GY ++ cat /tmp/tmp.HqnZGTdIb7 command terminated with exit code 1 ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.99wVeRv3GY ++ cat /tmp/tmp.HqnZGTdIb7 ++ rm /tmp/tmp.99wVeRv3GY /tmp/tmp.HqnZGTdIb7 ++ return 0 + nodes_count='Error: Authentication failed. : connect@src/mongo/shell/mongo.js:374:17 @(connect):2:6 exception: connect failed exiting with code 1 Error: Authentication failed. : connect@src/mongo/shell/mongo.js:374:17 @(connect):2:6 exception: connect failed exiting with code 1 4' + echo -n 'waiting for all members to be configured in cfg' waiting for all members to be configured in cfg+ let retry+=1 + '[' 1 -ge 15 ']' + echo . . + sleep 10 + [[ Error: Authentication failed. : connect@src/mongo/shell/mongo.js:374:17 @(connect):2:6 exception: connect failed exiting with code 1 Error: Authentication failed. : connect@src/mongo/shell/mongo.js:374:17 @(connect):2:6 exception: connect failed exiting with code 1 4 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.238.30 mongodb :27017 ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.238.30 ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Hs7lTX85Fd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fC1VdfAyvl +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Hs7lTX85Fd +++ cat /tmp/tmp.fC1VdfAyvl +++ rm /tmp/tmp.Hs7lTX85Fd /tmp/tmp.fC1VdfAyvl +++ return 0 ++ local client_container=psmdb-client-696897d69b-pjkvq ++ kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hZxsXG5qAS +++ mktemp ++ local LAST_ERR=/tmp/tmp.0IeDDekN23 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hZxsXG5qAS ++ cat /tmp/tmp.0IeDDekN23 ++ rm /tmp/tmp.hZxsXG5qAS /tmp/tmp.0IeDDekN23 ++ return 0 + nodes_count=5 + echo -n 'waiting for all members to be configured in cfg' waiting for all members to be configured in cfg+ let retry+=1 + '[' 2 -ge 15 ']' + echo . . + sleep 10 + [[ 5 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.238.30 mongodb :27017 ++ local 'command=rs.conf().members.length' ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.238.30 ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ echo :27017 egrep: warning: egrep is obsolescent; using grep -E +++ awk -F: '{print $2}' ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.MNt4Si5zLK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8A0bNieqBw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.MNt4Si5zLK +++ cat /tmp/tmp.8A0bNieqBw +++ rm /tmp/tmp.MNt4Si5zLK /tmp/tmp.8A0bNieqBw +++ return 0 ++ local client_container=psmdb-client-696897d69b-pjkvq ++ kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MZYoo3lv6U +++ mktemp ++ local LAST_ERR=/tmp/tmp.jKq8ypy5Tj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MZYoo3lv6U ++ cat /tmp/tmp.jKq8ypy5Tj ++ rm /tmp/tmp.MZYoo3lv6U /tmp/tmp.jKq8ypy5Tj ++ return 0 + nodes_count=5 + echo -n 'waiting for all members to be configured in cfg' waiting for all members to be configured in cfg+ let retry+=1 + '[' 3 -ge 15 ']' + echo . . + sleep 10 + [[ 5 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.238.30 mongodb :27017 ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.238.30 ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.g1r0E68Ov5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pJHGVyH90h +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.g1r0E68Ov5 +++ cat /tmp/tmp.pJHGVyH90h +++ rm /tmp/tmp.g1r0E68Ov5 /tmp/tmp.pJHGVyH90h +++ return 0 ++ local client_container=psmdb-client-696897d69b-pjkvq ++ kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VmamrDgLk7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.53cu6V4JlN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VmamrDgLk7 ++ cat /tmp/tmp.53cu6V4JlN ++ rm /tmp/tmp.VmamrDgLk7 /tmp/tmp.53cu6V4JlN ++ return 0 + nodes_count=5 + echo -n 'waiting for all members to be configured in cfg' waiting for all members to be configured in cfg+ let retry+=1 + '[' 4 -ge 15 ']' + echo . . + sleep 10 + [[ 5 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.238.30 mongodb :27017 ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.238.30 ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Jcswf6QSbj ++++ mktemp +++ local LAST_ERR=/tmp/tmp.oQu4Ci1eKh +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Jcswf6QSbj +++ cat /tmp/tmp.oQu4Ci1eKh +++ rm /tmp/tmp.Jcswf6QSbj /tmp/tmp.oQu4Ci1eKh +++ return 0 ++ local client_container=psmdb-client-696897d69b-pjkvq ++ kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.41Hw1ilhiE +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z9nRYXShn0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.41Hw1ilhiE ++ cat /tmp/tmp.Z9nRYXShn0 ++ rm /tmp/tmp.41Hw1ilhiE /tmp/tmp.Z9nRYXShn0 ++ return 0 + nodes_count=5 + echo -n 'waiting for all members to be configured in cfg' waiting for all members to be configured in cfg+ let retry+=1 + '[' 5 -ge 15 ']' + echo . . + sleep 10 + [[ 5 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.238.30 mongodb :27017 ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.238.30 ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GaHRXOt697 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cYijY2g9MH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.GaHRXOt697 +++ cat /tmp/tmp.cYijY2g9MH +++ rm /tmp/tmp.GaHRXOt697 /tmp/tmp.cYijY2g9MH +++ return 0 ++ local client_container=psmdb-client-696897d69b-pjkvq ++ kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TLVEc0AMTU +++ mktemp ++ local LAST_ERR=/tmp/tmp.ojmEUZWqE7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TLVEc0AMTU ++ cat /tmp/tmp.ojmEUZWqE7 ++ rm /tmp/tmp.TLVEc0AMTU /tmp/tmp.ojmEUZWqE7 ++ return 0 + nodes_count=5 + echo -n 'waiting for all members to be configured in cfg' waiting for all members to be configured in cfg+ let retry+=1 + '[' 6 -ge 15 ']' + echo . . + sleep 10 + [[ 5 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.238.30 mongodb :27017 ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.238.30 ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.yTsBFouFzG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.f7ike6Dj9J +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.yTsBFouFzG +++ cat /tmp/tmp.f7ike6Dj9J +++ rm /tmp/tmp.yTsBFouFzG /tmp/tmp.f7ike6Dj9J +++ return 0 ++ local client_container=psmdb-client-696897d69b-pjkvq ++ kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.r0IkFMsLl9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.NVUCD2EQTK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.r0IkFMsLl9 ++ cat /tmp/tmp.NVUCD2EQTK ++ rm /tmp/tmp.r0IkFMsLl9 /tmp/tmp.NVUCD2EQTK ++ return 0 + nodes_count=5 + echo -n 'waiting for all members to be configured in cfg' waiting for all members to be configured in cfg+ let retry+=1 + '[' 7 -ge 15 ']' + echo . . + sleep 10 + [[ 5 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.238.30 mongodb :27017 ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.238.30 ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jCGZFmQ7J6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YjuBThg1Yu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.jCGZFmQ7J6 +++ cat /tmp/tmp.YjuBThg1Yu +++ rm /tmp/tmp.jCGZFmQ7J6 /tmp/tmp.YjuBThg1Yu +++ return 0 ++ local client_container=psmdb-client-696897d69b-pjkvq ++ kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UaFsaOnS4s +++ mktemp ++ local LAST_ERR=/tmp/tmp.P02S8GsNBV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UaFsaOnS4s ++ cat /tmp/tmp.P02S8GsNBV ++ rm /tmp/tmp.UaFsaOnS4s /tmp/tmp.P02S8GsNBV ++ return 0 + nodes_count=5 + echo -n 'waiting for all members to be configured in cfg' waiting for all members to be configured in cfg+ let retry+=1 + '[' 8 -ge 15 ']' + echo . . + sleep 10 + [[ 5 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.238.30 mongodb :27017 ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.238.30 ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ awk -F: '{print $2}' +++ echo :27017 egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.N15xBHY6aW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NOUq13BSEu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.N15xBHY6aW +++ cat /tmp/tmp.NOUq13BSEu +++ rm /tmp/tmp.N15xBHY6aW /tmp/tmp.NOUq13BSEu +++ return 0 ++ local client_container=psmdb-client-696897d69b-pjkvq ++ kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xREipnbiMN +++ mktemp ++ local LAST_ERR=/tmp/tmp.XghreiQb6y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xREipnbiMN ++ cat /tmp/tmp.XghreiQb6y ++ rm /tmp/tmp.xREipnbiMN /tmp/tmp.XghreiQb6y ++ return 0 + nodes_count=6 + echo -n 'waiting for all members to be configured in cfg' waiting for all members to be configured in cfg+ let retry+=1 + '[' 9 -ge 15 ']' + echo . . + sleep 10 + [[ 6 == 6 ]] + wait_for_members 34.118.237.130 rs0 6 + local endpoint=34.118.237.130 + local rsName=rs0 + local target_count=6 + local nodes_count=0 + [[ 0 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.237.130 mongodb :27017 ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.237.130 ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ local port=27017 ++ local mongo_bin=mongo ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UPEvipGjWh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mgfKDgFT0O +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.UPEvipGjWh +++ cat /tmp/tmp.mgfKDgFT0O +++ rm /tmp/tmp.UPEvipGjWh /tmp/tmp.mgfKDgFT0O +++ return 0 ++ local client_container=psmdb-client-696897d69b-pjkvq ++ kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.237.130:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gw4Z4bNZZX +++ mktemp ++ local LAST_ERR=/tmp/tmp.UomE8Lpruw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.237.130:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gw4Z4bNZZX ++ cat /tmp/tmp.UomE8Lpruw ++ rm /tmp/tmp.gw4Z4bNZZX /tmp/tmp.UomE8Lpruw ++ return 0 + nodes_count=6 + echo -n 'waiting for all members to be configured in rs0' waiting for all members to be configured in rs0+ let retry+=1 + '[' 10 -ge 15 ']' + echo . . + sleep 10 + [[ 6 == 6 ]] + wait_for_members 34.118.237.241 rs1 6 + local endpoint=34.118.237.241 + local rsName=rs1 + local target_count=6 + local nodes_count=0 + [[ 0 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.237.241 mongodb :27017 ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.237.241 ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Vdq3VUmC01 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vd4jxEObRF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Vdq3VUmC01 +++ cat /tmp/tmp.vd4jxEObRF +++ rm /tmp/tmp.Vdq3VUmC01 /tmp/tmp.vd4jxEObRF +++ return 0 ++ local client_container=psmdb-client-696897d69b-pjkvq ++ kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.237.241:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.olZL38O1NB +++ mktemp ++ local LAST_ERR=/tmp/tmp.ou8zHyJRVD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.237.241:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.olZL38O1NB ++ cat /tmp/tmp.ou8zHyJRVD ++ rm /tmp/tmp.olZL38O1NB /tmp/tmp.ou8zHyJRVD ++ return 0 + nodes_count=6 + echo -n 'waiting for all members to be configured in rs1' waiting for all members to be configured in rs1+ let retry+=1 + '[' 11 -ge 15 ']' + echo . . + sleep 10 + [[ 6 == 6 ]] ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.On00V09S8r +++ mktemp ++ local LAST_ERR=/tmp/tmp.tai0bv6Esm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.On00V09S8r ++ cat /tmp/tmp.tai0bv6Esm ++ rm /tmp/tmp.On00V09S8r /tmp/tmp.tai0bv6Esm ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2109-03bacb5d-1-cluster3 --namespace=cross-site-sharded-replica-519 ++ mktemp + local LAST_OUT=/tmp/tmp.BDxWMoSl1V ++ mktemp + local LAST_ERR=/tmp/tmp.VBAHFPOBIG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2109-03bacb5d-1-cluster3 --namespace=cross-site-sharded-replica-519 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BDxWMoSl1V Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2109-03bacb5d-1-cluster3" modified. + cat /tmp/tmp.VBAHFPOBIG + rm /tmp/tmp.BDxWMoSl1V /tmp/tmp.VBAHFPOBIG + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running cross-site-sharded-replica-rs0 3 + local name=cross-site-sharded-replica-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=cross-site-sharded-replica ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod cross-site-sharded-replica-rs0-0 + local pod=cross-site-sharded-replica-rs0-0 + set +o xtrace waiting for pod/cross-site-sharded-replica-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod cross-site-sharded-replica-rs0-1 + local pod=cross-site-sharded-replica-rs0-1 + set +o xtrace waiting for pod/cross-site-sharded-replica-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Yw3AdsSUwy +++ mktemp ++ local LAST_ERR=/tmp/tmp.yPKaqckYDW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Yw3AdsSUwy ++ cat /tmp/tmp.yPKaqckYDW ++ rm /tmp/tmp.Yw3AdsSUwy /tmp/tmp.yPKaqckYDW ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod cross-site-sharded-replica-rs0-2 + local pod=cross-site-sharded-replica-rs0-2 + set +o xtrace waiting for pod/cross-site-sharded-replica-rs0-2 to be ready.OK ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6oet8fxL6N +++ mktemp ++ local LAST_ERR=/tmp/tmp.0YArg22uWL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6oet8fxL6N ++ cat /tmp/tmp.0YArg22uWL ++ rm /tmp/tmp.6oet8fxL6N /tmp/tmp.0YArg22uWL ++ return 0 + [[ false == \t\r\u\e ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7gwUdN8rBf +++ mktemp ++ local LAST_ERR=/tmp/tmp.L3CaQsVZSY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7gwUdN8rBf ++ cat /tmp/tmp.L3CaQsVZSY ++ rm /tmp/tmp.7gwUdN8rBf /tmp/tmp.L3CaQsVZSY ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running cross-site-sharded-replica-cfg 3 false + local name=cross-site-sharded-replica-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=cross-site-sharded-replica ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod cross-site-sharded-replica-cfg-0 + local pod=cross-site-sharded-replica-cfg-0 + set +o xtrace waiting for pod/cross-site-sharded-replica-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod cross-site-sharded-replica-cfg-1 + local pod=cross-site-sharded-replica-cfg-1 + set +o xtrace waiting for pod/cross-site-sharded-replica-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fkWPKRN49c +++ mktemp ++ local LAST_ERR=/tmp/tmp.a3OrLh4i4T ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fkWPKRN49c ++ cat /tmp/tmp.a3OrLh4i4T ++ rm /tmp/tmp.fkWPKRN49c /tmp/tmp.a3OrLh4i4T ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod cross-site-sharded-replica-cfg-2 + local pod=cross-site-sharded-replica-cfg-2 + set +o xtrace waiting for pod/cross-site-sharded-replica-cfg-2 to be ready.OK ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JRi5ZuT20p +++ mktemp ++ local LAST_ERR=/tmp/tmp.DjizRXv60M ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JRi5ZuT20p ++ cat /tmp/tmp.DjizRXv60M ++ rm /tmp/tmp.JRi5ZuT20p /tmp/tmp.DjizRXv60M ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Q0I9NjDjOn +++ mktemp ++ local LAST_ERR=/tmp/tmp.o5gplEtSXn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Q0I9NjDjOn ++ cat /tmp/tmp.o5gplEtSXn ++ rm /tmp/tmp.Q0I9NjDjOn /tmp/tmp.o5gplEtSXn ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + desc 'create user' + set +o xtrace ----------------------------------------------------------------------------------- create user ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZMd6JkU8Ry +++ mktemp ++ local LAST_ERR=/tmp/tmp.3il0Xn5uBU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZMd6JkU8Ry ++ cat /tmp/tmp.3il0Xn5uBU ++ rm /tmp/tmp.ZMd6JkU8Ry /tmp/tmp.3il0Xn5uBU ++ return 0 + local client_container=psmdb-client-696897d69b-wg5lg + kubectl_bin exec psmdb-client-696897d69b-wg5lg -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.xgfRQ4LyYh ++ mktemp + local LAST_ERR=/tmp/tmp.5yN3iRgx4x + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-wg5lg -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xgfRQ4LyYh Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("2a7741eb-363b-44bf-b249-92b77ade3e8a") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.5yN3iRgx4x + rm /tmp/tmp.xgfRQ4LyYh /tmp/tmp.5yN3iRgx4x + return 0 + sleep 2 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-5777 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-5777 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kTEzS89Y53 +++ mktemp ++ local LAST_ERR=/tmp/tmp.BPTJxrKs8k ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kTEzS89Y53 ++ cat /tmp/tmp.BPTJxrKs8k ++ rm /tmp/tmp.kTEzS89Y53 /tmp/tmp.BPTJxrKs8k ++ return 0 + local client_container=psmdb-client-696897d69b-wg5lg + kubectl_bin exec psmdb-client-696897d69b-wg5lg -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.ugu2AfFw1E ++ mktemp + local LAST_ERR=/tmp/tmp.vop6es1pW7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-wg5lg -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ugu2AfFw1E Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("c61d51e2-96c8-4eba-8665-59c0ae61143c") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.vop6es1pW7 + rm /tmp/tmp.ugu2AfFw1E /tmp/tmp.vop6es1pW7 + return 0 + minikube_sleep + sleep_time=10 + [[ '' == 1 ]] + desc 'Compare data' + set +o xtrace ----------------------------------------------------------------------------------- Compare data ----------------------------------------------------------------------------------- + compare_mongos_cmd find myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-5777 + local command=find + local uri=myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-5777 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-5777 mongodb '' '' 27017 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-5777 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OiD9B5MQNS +++ mktemp ++ local LAST_ERR=/tmp/tmp.7BUmDaA4nl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OiD9B5MQNS ++ cat /tmp/tmp.7BUmDaA4nl ++ rm /tmp/tmp.OiD9B5MQNS /tmp/tmp.7BUmDaA4nl ++ return 0 + local client_container=psmdb-client-696897d69b-wg5lg + kubectl_bin exec psmdb-client-696897d69b-wg5lg -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.veiaozGHgh ++ mktemp + local LAST_ERR=/tmp/tmp.JYTMJycjQm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-wg5lg -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-5777.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.veiaozGHgh + cat /tmp/tmp.JYTMJycjQm + rm /tmp/tmp.veiaozGHgh /tmp/tmp.JYTMJycjQm + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/compare/find.json /tmp/tmp.n7hOMHSJhX/find + desc 'test failover' + set +o xtrace ----------------------------------------------------------------------------------- test failover ----------------------------------------------------------------------------------- ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.BrAIhgzifO +++ mktemp ++ local LAST_ERR=/tmp/tmp.S48du88wQm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BrAIhgzifO ++ cat /tmp/tmp.S48du88wQm ++ rm /tmp/tmp.BrAIhgzifO /tmp/tmp.S48du88wQm ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2109-03bacb5d-1-cluster3 --namespace=cross-site-sharded-5777 ++ mktemp + local LAST_OUT=/tmp/tmp.wjX5O9GMEV ++ mktemp + local LAST_ERR=/tmp/tmp.j2aDnupLSz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2109-03bacb5d-1-cluster3 --namespace=cross-site-sharded-5777 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wjX5O9GMEV Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2109-03bacb5d-1-cluster3" modified. + cat /tmp/tmp.j2aDnupLSz + rm /tmp/tmp.wjX5O9GMEV /tmp/tmp.j2aDnupLSz + return 0 + kubectl_bin delete psmdb cross-site-sharded-main ++ mktemp + local LAST_OUT=/tmp/tmp.6IHrRnmUkK ++ mktemp + local LAST_ERR=/tmp/tmp.v96IrnYlTp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb cross-site-sharded-main + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6IHrRnmUkK perconaservermongodb.psmdb.percona.com "cross-site-sharded-main" deleted from cross-site-sharded-5777 namespace + cat /tmp/tmp.v96IrnYlTp + rm /tmp/tmp.6IHrRnmUkK /tmp/tmp.v96IrnYlTp + return 0 + desc 'run disaster recovery script for replset: cfg' + set +o xtrace ----------------------------------------------------------------------------------- run disaster recovery script for replset: cfg ----------------------------------------------------------------------------------- + run_script_mongos /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/disaster_recovery.js clusterAdmin:clusterAdmin123456@34.118.238.30 mongodb :27017 + local script=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/disaster_recovery.js + local uri=clusterAdmin:clusterAdmin123456@34.118.238.30 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local mongo_bin=mongo ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FDihYQOxrZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.jGzl8TLSkr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FDihYQOxrZ ++ cat /tmp/tmp.jGzl8TLSkr ++ rm /tmp/tmp.FDihYQOxrZ /tmp/tmp.jGzl8TLSkr ++ return 0 + local client_container=psmdb-client-696897d69b-pjkvq ++ basename /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/disaster_recovery.js + name=disaster_recovery.js + kubectl_bin cp /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/disaster_recovery.js cross-site-sharded-5777/psmdb-client-696897d69b-pjkvq:/tmp ++ mktemp + local LAST_OUT=/tmp/tmp.EmDcE4PXZ5 ++ mktemp + local LAST_ERR=/tmp/tmp.QvUgjjKAtz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl cp /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2109/e2e-tests/cross-site-sharded/disaster_recovery.js cross-site-sharded-5777/psmdb-client-696897d69b-pjkvq:/tmp + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EmDcE4PXZ5 + cat /tmp/tmp.QvUgjjKAtz + rm /tmp/tmp.EmDcE4PXZ5 /tmp/tmp.QvUgjjKAtz + return 0 + kubectl_bin exec psmdb-client-696897d69b-pjkvq -- bash -c 'mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin /tmp/disaster_recovery.js' ++ mktemp + local LAST_OUT=/tmp/tmp.JDUd76v7Tv ++ mktemp + local LAST_ERR=/tmp/tmp.lxwkH8NlCX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin /tmp/disaster_recovery.js' + exit_status=253 + set -e + '[' 253 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.JDUd76v7Tv Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.238.30:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("cd7ef135-741c-4d59-8b6e-c4a617e98001") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match uncaught exception: TypeError: member3 is undefined : @/tmp/disaster_recovery.js:12:1 failed to load: /tmp/disaster_recovery.js exiting with code -3 + cat /tmp/tmp.lxwkH8NlCX command terminated with exit code 253 + sleep 0 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin /tmp/disaster_recovery.js' + exit_status=253 + set -e + '[' 253 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.JDUd76v7Tv Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.238.30:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("8e36fb79-581f-4664-9957-441907e8c2ed") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match uncaught exception: TypeError: member3 is undefined : @/tmp/disaster_recovery.js:12:1 failed to load: /tmp/disaster_recovery.js exiting with code -3 + cat /tmp/tmp.lxwkH8NlCX command terminated with exit code 253 + sleep 4 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-pjkvq -- bash -c 'mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.238.30:27017/admin /tmp/disaster_recovery.js' + exit_status=253 + set -e + '[' 253 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.JDUd76v7Tv Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.238.30:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("6aa83416-2885-43f5-b598-057c54fd78a2") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match uncaught exception: TypeError: member3 is undefined : @/tmp/disaster_recovery.js:12:1 failed to load: /tmp/disaster_recovery.js exiting with code -3 + cat /tmp/tmp.lxwkH8NlCX command terminated with exit code 253 + sleep 8 + cat /tmp/tmp.JDUd76v7Tv Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.238.30:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("6aa83416-2885-43f5-b598-057c54fd78a2") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match uncaught exception: TypeError: member3 is undefined : @/tmp/disaster_recovery.js:12:1 failed to load: /tmp/disaster_recovery.js exiting with code -3 + cat /tmp/tmp.lxwkH8NlCX command terminated with exit code 253 + rm /tmp/tmp.JDUd76v7Tv /tmp/tmp.lxwkH8NlCX + return 253