Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/logs/cross-site-sharded.log grep: warning: stray \ before - Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 ++ get_mongod_ver_from_image perconalab/percona-server-mongodb-operator:main-mongod8.0 ++ local image=perconalab/percona-server-mongodb-operator:main-mongod8.0 +++ run_simple_cli_inside_image perconalab/percona-server-mongodb-operator:main-mongod8.0 'mongod --version' +++ local image=perconalab/percona-server-mongodb-operator:main-mongod8.0 +++ local 'cli=mongod --version' +++ local pod_name=15358 +++ kubectl_bin -n default run 15358 --image=perconalab/percona-server-mongodb-operator:main-mongod8.0 --restart=Never --command -- sleep infinity +++ /usr/sbin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.brDWeO5tpZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.l27DuX95vY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default run 15358 --image=perconalab/percona-server-mongodb-operator:main-mongod8.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.brDWeO5tpZ +++ cat /tmp/tmp.l27DuX95vY +++ rm /tmp/tmp.brDWeO5tpZ /tmp/tmp.l27DuX95vY +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/15358 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9rVgqEk0H8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.piIy7tH3iY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/15358 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.9rVgqEk0H8 +++ cat /tmp/tmp.piIy7tH3iY +++ rm /tmp/tmp.9rVgqEk0H8 /tmp/tmp.piIy7tH3iY +++ return 0 ++++ kubectl_bin -n default exec 15358 -- bash -c 'mongod --version 2>&1' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.bAXDdd4FSV +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.gBHa4hjLAm ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl -n default exec 15358 -- bash -c 'mongod --version 2>&1' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.bAXDdd4FSV ++++ cat /tmp/tmp.gBHa4hjLAm ++++ rm /tmp/tmp.bAXDdd4FSV /tmp/tmp.gBHa4hjLAm ++++ return 0 +++ local 'output=db version v8.0.16-5 Build Info: { "version": "8.0.16-5", "gitVersion": "f174b291665d4de5c104d07bff06eabe7a913868", "openSSLVersion": "OpenSSL 3.5.1 1 Jul 2025", "modules": [], "perconaFeatures": [ "MemoryEngine", "HotBackup", "BackupCursorAggregationStage", "BackupCursorExtendAggregationStage", "AWSIAM", "Kerberos", "LDAP", "OIDC", "TDE", "FIPSMode", "FCBIS", "Auditing", "ProfilingRateLimit", "LogRedaction", "ngram" ], "allocator": "tcmalloc-google", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/15358 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.MUEKlPNpep ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Ra1OM8OAGX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default delete pod/15358 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.MUEKlPNpep +++ cat /tmp/tmp.Ra1OM8OAGX Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.MUEKlPNpep /tmp/tmp.Ra1OM8OAGX +++ return 0 +++ echo db version v8.0.16-5 Build Info: '{' '"version":' '"8.0.16-5",' '"gitVersion":' '"f174b291665d4de5c104d07bff06eabe7a913868",' '"openSSLVersion":' '"OpenSSL' 3.5.1 1 Jul '2025",' '"modules":' '[],' '"perconaFeatures":' '[' '"MemoryEngine",' '"HotBackup",' '"BackupCursorAggregationStage",' '"BackupCursorExtendAggregationStage",' '"AWSIAM",' '"Kerberos",' '"LDAP",' '"OIDC",' '"TDE",' '"FIPSMode",' '"FCBIS",' '"Auditing",' '"ProfilingRateLimit",' '"LogRedaction",' '"ngram"' '],' '"allocator":' '"tcmalloc-google",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=8.0.16-5 ++ [[ ! 8.0.16-5 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 8.0.16-5 + FULL_VER=8.0.16-5 + MONGO_VER=8.0 + unset OPERATOR_NS + main_cluster=cross-site-sharded-main + replica_cluster=cross-site-sharded-replica + desc 'create main cluster' + set +o xtrace ----------------------------------------------------------------------------------- create main cluster ----------------------------------------------------------------------------------- + create_infra cross-site-sharded-19485 + local ns=cross-site-sharded-19485 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.2NtbvDg1V3 ++ mktemp + local LAST_ERR=/tmp/tmp.NN1CNhJY9O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2NtbvDg1V3 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.NN1CNhJY9O + rm /tmp/tmp.2NtbvDg1V3 /tmp/tmp.NN1CNhJY9O + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.bJpvGG4qlW ++ mktemp + local LAST_ERR=/tmp/tmp.XfnDlQhPmw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bJpvGG4qlW + cat /tmp/tmp.XfnDlQhPmw + rm /tmp/tmp.bJpvGG4qlW /tmp/tmp.XfnDlQhPmw + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.iKvUZKc8w6 ++ mktemp + local LAST_ERR=/tmp/tmp.FGRCYuXCmS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iKvUZKc8w6 + cat /tmp/tmp.FGRCYuXCmS + rm /tmp/tmp.iKvUZKc8w6 /tmp/tmp.FGRCYuXCmS + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.7SbIcFxqBH ++ mktemp + local LAST_ERR=/tmp/tmp.g4KBd7t2VA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7SbIcFxqBH + cat /tmp/tmp.g4KBd7t2VA + rm /tmp/tmp.7SbIcFxqBH /tmp/tmp.g4KBd7t2VA + return 0 + local rbac_yaml=rbac.yaml + '[' -n '' ']' + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/deploy/rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.mLHPiokAav ++ mktemp + local LAST_ERR=/tmp/tmp.eyUnijxMcX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/deploy/rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mLHPiokAav role.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted from cross-site-sharded-24455 namespace serviceaccount "percona-server-mongodb-operator" deleted from cross-site-sharded-24455 namespace rolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted from cross-site-sharded-24455 namespace + cat /tmp/tmp.eyUnijxMcX + rm /tmp/tmp.mLHPiokAav /tmp/tmp.eyUnijxMcX + return 0 + check_crd_for_deletion PR-2135-b9c4516c + local git_tag=PR-2135-b9c4516c ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2135-b9c4516c/deploy/crd.yaml ++ /usr/sbin/sed s/---//g ++ yq eval .metadata.name ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.D5gRcnKq9c +++ mktemp ++ local LAST_ERR=/tmp/tmp.5u94Lk0dqi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.D5gRcnKq9c ++ cat /tmp/tmp.5u94Lk0dqi Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.D5gRcnKq9c ++ cat /tmp/tmp.5u94Lk0dqi Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.D5gRcnKq9c ++ cat /tmp/tmp.5u94Lk0dqi Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.D5gRcnKq9c ++ cat /tmp/tmp.5u94Lk0dqi Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.D5gRcnKq9c /tmp/tmp.5u94Lk0dqi ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n '' ']' + create_namespace cross-site-sharded-19485 + local namespace=cross-site-sharded-19485 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces cross-site-sharded-19485' ++ mktemp + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces cross-site-sharded-19485 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace cross-site-sharded-19485 --ignore-not-found + xargs kubectl delete ns ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.ygquNLuVF5 ++ mktemp + local LAST_OUT=/tmp/tmp.5GBNz2h7h8 ++ mktemp + local LAST_ERR=/tmp/tmp.4F5W2BnLZf + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.6qauFlMKwE + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace cross-site-sharded-19485 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ygquNLuVF5 + cat /tmp/tmp.4F5W2BnLZf + rm /tmp/tmp.ygquNLuVF5 /tmp/tmp.4F5W2BnLZf + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5GBNz2h7h8 + cat /tmp/tmp.6qauFlMKwE + rm /tmp/tmp.5GBNz2h7h8 /tmp/tmp.6qauFlMKwE + return 0 + kubectl_bin wait --for=delete namespace cross-site-sharded-19485 ++ mktemp + local LAST_OUT=/tmp/tmp.n7oeCPS1uK ++ mktemp + local LAST_ERR=/tmp/tmp.JgvKM5OUXR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace cross-site-sharded-19485 namespace "cross-site-sharded-24455" deleted namespace "cross-site-sharded-replica-14868" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.n7oeCPS1uK + cat /tmp/tmp.JgvKM5OUXR + rm /tmp/tmp.n7oeCPS1uK /tmp/tmp.JgvKM5OUXR + return 0 + desc 'create namespace cross-site-sharded-19485' + set +o xtrace ----------------------------------------------------------------------------------- create namespace cross-site-sharded-19485 ----------------------------------------------------------------------------------- + kubectl_bin create namespace cross-site-sharded-19485 ++ mktemp + local LAST_OUT=/tmp/tmp.BiTG7qBK6i ++ mktemp + local LAST_ERR=/tmp/tmp.zEGohdcAdK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace cross-site-sharded-19485 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BiTG7qBK6i namespace/cross-site-sharded-19485 created + cat /tmp/tmp.zEGohdcAdK + rm /tmp/tmp.BiTG7qBK6i /tmp/tmp.zEGohdcAdK + return 0 + set_kube_ctx cross-site-sharded-19485 + local namespace=cross-site-sharded-19485 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.mYqaoZcYT5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.EfFc9zg065 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mYqaoZcYT5 ++ cat /tmp/tmp.EfFc9zg065 ++ rm /tmp/tmp.mYqaoZcYT5 /tmp/tmp.EfFc9zg065 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2135-b9c4516c-6-cluster3 --namespace=cross-site-sharded-19485 ++ mktemp + local LAST_OUT=/tmp/tmp.7LLuCz59b8 ++ mktemp + local LAST_ERR=/tmp/tmp.g5ARsHg3hS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2135-b9c4516c-6-cluster3 --namespace=cross-site-sharded-19485 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7LLuCz59b8 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2135-b9c4516c-6-cluster3" modified. + cat /tmp/tmp.g5ARsHg3hS + rm /tmp/tmp.7LLuCz59b8 /tmp/tmp.g5ARsHg3hS + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2135-b9c4516c' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2135-b9c4516c ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.xu8nfGqfgl ++ mktemp + local LAST_ERR=/tmp/tmp.ng9iwG24Hh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xu8nfGqfgl customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.ng9iwG24Hh + rm /tmp/tmp.xu8nfGqfgl /tmp/tmp.ng9iwG24Hh + return 0 + '[' -n '' ']' + apply_rbac rbac + local operator_namespace=psmdb-operator + local rbac=rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/deploy/rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.IwNyuxmkur ++ mktemp + local LAST_ERR=/tmp/tmp.eS8Ne6EP0c + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IwNyuxmkur role.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created rolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.eS8Ne6EP0c + rm /tmp/tmp.IwNyuxmkur /tmp/tmp.eS8Ne6EP0c + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2135-b9c4516c") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/deploy/operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.phjFPcIHQc ++ mktemp + local LAST_ERR=/tmp/tmp.nuy3nHm1ss + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.phjFPcIHQc deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.nuy3nHm1ss + rm /tmp/tmp.phjFPcIHQc /tmp/tmp.nuy3nHm1ss + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oj1qHHfEPt +++ mktemp ++ local LAST_ERR=/tmp/tmp.CX2GYku4Kc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oj1qHHfEPt ++ cat /tmp/tmp.CX2GYku4Kc ++ rm /tmp/tmp.oj1qHHfEPt /tmp/tmp.CX2GYku4Kc ++ return 0 + wait_operator_pod percona-server-mongodb-operator-679bb66968-thlb6 + local pod=percona-server-mongodb-operator-679bb66968-thlb6 + set +o xtrace waiting for pod/percona-server-mongodb-operator-679bb66968-thlb6 to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iw8oPYruZm +++ mktemp ++ local LAST_ERR=/tmp/tmp.4ILoy1sx8e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iw8oPYruZm ++ cat /tmp/tmp.4ILoy1sx8e ++ rm /tmp/tmp.iw8oPYruZm /tmp/tmp.4ILoy1sx8e ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-679bb66968-thlb6 ++ mktemp + local LAST_OUT=/tmp/tmp.PxBLuBMGyY ++ mktemp + local LAST_ERR=/tmp/tmp.G8A1a8RrGQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs percona-server-mongodb-operator-679bb66968-thlb6 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PxBLuBMGyY + cat /tmp/tmp.G8A1a8RrGQ + rm /tmp/tmp.PxBLuBMGyY /tmp/tmp.G8A1a8RrGQ + return 0 2025-12-23T12:56:07.024Z INFO setup Manager starting up {"gitCommit": "b9c4516cc593d15ff1df09a591d9d45490aaf7c6", "gitBranch": "PR-2135-b9c4516c", "buildTime": "", "goVersion": "go1.25.5", "os": "linux", "arch": "amd64"} + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.xXMdiPztw3 ++ mktemp + local LAST_ERR=/tmp/tmp.6sxKVsbtIl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xXMdiPztw3 deployment.apps/psmdb-client created secret/cross-site-sharded-main-secrets created secret/cross-site-sharded-main-ssl created secret/cross-site-sharded-main-ssl-internal created + cat /tmp/tmp.6sxKVsbtIl + rm /tmp/tmp.xXMdiPztw3 /tmp/tmp.6sxKVsbtIl + return 0 + desc 'create main PSMDB cluster cross-site-sharded-main.' + set +o xtrace ----------------------------------------------------------------------------------- create main PSMDB cluster cross-site-sharded-main. ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/conf/cross-site-sharded-main.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/conf/cross-site-sharded-main.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/conf/cross-site-sharded-main.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/cross-site-sharded-19485/g + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2135-b9c4516c"' + local LAST_OUT=/tmp/tmp.7fTsSXvHw2 ++ mktemp + local LAST_ERR=/tmp/tmp.8wHLz0nKGz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7fTsSXvHw2 perconaservermongodb.psmdb.percona.com/cross-site-sharded-main created + cat /tmp/tmp.8wHLz0nKGz + rm /tmp/tmp.7fTsSXvHw2 /tmp/tmp.8wHLz0nKGz + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running cross-site-sharded-main-rs0 3 + local name=cross-site-sharded-main-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=cross-site-sharded-main ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod cross-site-sharded-main-rs0-0 + local pod=cross-site-sharded-main-rs0-0 + set +o xtrace waiting for pod/cross-site-sharded-main-rs0-0 to be ready..........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod cross-site-sharded-main-rs0-1 + local pod=cross-site-sharded-main-rs0-1 + set +o xtrace waiting for pod/cross-site-sharded-main-rs0-1 to be ready...........OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OZ6jje4A7v +++ mktemp ++ local LAST_ERR=/tmp/tmp.jndoNM5Aag ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OZ6jje4A7v ++ cat /tmp/tmp.jndoNM5Aag ++ rm /tmp/tmp.OZ6jje4A7v /tmp/tmp.jndoNM5Aag ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod cross-site-sharded-main-rs0-2 + local pod=cross-site-sharded-main-rs0-2 + set +o xtrace waiting for pod/cross-site-sharded-main-rs0-2 to be ready..........OK ++ kubectl_bin get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KBQUtqR7ec +++ mktemp ++ local LAST_ERR=/tmp/tmp.DiVBpDta0B ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KBQUtqR7ec ++ cat /tmp/tmp.DiVBpDta0B ++ rm /tmp/tmp.KBQUtqR7ec /tmp/tmp.DiVBpDta0B ++ return 0 + [[ false == \t\r\u\e ]] ++ kubectl_bin get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZtYbw7NOFZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.tov0A5QX9R ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZtYbw7NOFZ ++ cat /tmp/tmp.tov0A5QX9R ++ rm /tmp/tmp.ZtYbw7NOFZ /tmp/tmp.tov0A5QX9R ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.................................................... + wait_for_running cross-site-sharded-main-cfg 3 false + local name=cross-site-sharded-main-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=cross-site-sharded-main ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod cross-site-sharded-main-cfg-0 + local pod=cross-site-sharded-main-cfg-0 + set +o xtrace waiting for pod/cross-site-sharded-main-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod cross-site-sharded-main-cfg-1 + local pod=cross-site-sharded-main-cfg-1 + set +o xtrace waiting for pod/cross-site-sharded-main-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.D7N4iQzpWR +++ mktemp ++ local LAST_ERR=/tmp/tmp.z51zm9ea0p ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.D7N4iQzpWR ++ cat /tmp/tmp.z51zm9ea0p ++ rm /tmp/tmp.D7N4iQzpWR /tmp/tmp.z51zm9ea0p ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod cross-site-sharded-main-cfg-2 + local pod=cross-site-sharded-main-cfg-2 + set +o xtrace waiting for pod/cross-site-sharded-main-cfg-2 to be ready.OK ++ kubectl_bin get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4LcoqD9KYl +++ mktemp ++ local LAST_ERR=/tmp/tmp.CaOJOZLR6U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4LcoqD9KYl ++ cat /tmp/tmp.CaOJOZLR6U ++ rm /tmp/tmp.4LcoqD9KYl /tmp/tmp.CaOJOZLR6U ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.67xBPQuE1J +++ mktemp ++ local LAST_ERR=/tmp/tmp.rkCxJPlMEC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-main -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.67xBPQuE1J ++ cat /tmp/tmp.rkCxJPlMEC ++ rm /tmp/tmp.67xBPQuE1J /tmp/tmp.rkCxJPlMEC ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + desc 'create user' + set +o xtrace ----------------------------------------------------------------------------------- create user ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"user",pwd:"pass",roles:[{db:"app",role:"readWrite"}]})' userAdmin:userAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485 + local 'command=db.createUser({user:"user",pwd:"pass",roles:[{db:"app",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XiX3uPQUiM +++ mktemp ++ local LAST_ERR=/tmp/tmp.D1oOczaR8F ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XiX3uPQUiM ++ cat /tmp/tmp.D1oOczaR8F ++ rm /tmp/tmp.XiX3uPQUiM /tmp/tmp.D1oOczaR8F ++ return 0 + local client_container=psmdb-client-696897d69b-8t86g + kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''db.createUser({user:"user",pwd:"pass",roles:[{db:"app",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.ly2KUxfJTa ++ mktemp + local LAST_ERR=/tmp/tmp.wkHJTRWWOf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''db.createUser({user:"user",pwd:"pass",roles:[{db:"app",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ly2KUxfJTa Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("0c045827-7f13-4b83-abcd-562829731582") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match Successfully added user: { "user" : "user", "roles" : [ { "db" : "app", "role" : "readWrite" } ] } bye + cat /tmp/tmp.wkHJTRWWOf + rm /tmp/tmp.ly2KUxfJTa /tmp/tmp.wkHJTRWWOf + return 0 + sleep 2 + desc 'set chunk size to 2 MB' + set +o xtrace ----------------------------------------------------------------------------------- set chunk size to 2 MB ----------------------------------------------------------------------------------- + run_mongos 'use config\n db.settings.save( { _id:"chunksize", value: 2 } )' clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485 + local 'command=use config\n db.settings.save( { _id:"chunksize", value: 2 } )' + local uri=clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2QUHHFs6mi +++ mktemp ++ local LAST_ERR=/tmp/tmp.3bkIRJoQOz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2QUHHFs6mi ++ cat /tmp/tmp.3bkIRJoQOz ++ rm /tmp/tmp.2QUHHFs6mi /tmp/tmp.3bkIRJoQOz ++ return 0 + local client_container=psmdb-client-696897d69b-8t86g + kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''use config\n db.settings.save( { _id:"chunksize", value: 2 } )\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.kDaEGvCBRn ++ mktemp + local LAST_ERR=/tmp/tmp.hoPfmhOHeo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''use config\n db.settings.save( { _id:"chunksize", value: 2 } )\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kDaEGvCBRn Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("45ee6c99-0538-4684-b8c7-8ba7ce6bc364") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db config WriteResult({ "nMatched" : 0, "nUpserted" : 1, "nModified" : 0, "_id" : "chunksize" }) bye + cat /tmp/tmp.hoPfmhOHeo + rm /tmp/tmp.kDaEGvCBRn /tmp/tmp.hoPfmhOHeo + return 0 + sleep 2 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + run_script_mongos /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/data.js user:pass@cross-site-sharded-main-mongos.cross-site-sharded-19485 + local script=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/data.js + local uri=user:pass@cross-site-sharded-main-mongos.cross-site-sharded-19485 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local mongo_bin=mongo ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.18YpmBAEzJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.nWo5aC66WR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.18YpmBAEzJ ++ cat /tmp/tmp.nWo5aC66WR ++ rm /tmp/tmp.18YpmBAEzJ /tmp/tmp.nWo5aC66WR ++ return 0 + local client_container=psmdb-client-696897d69b-8t86g ++ basename /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/data.js + name=data.js + kubectl_bin cp /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/data.js cross-site-sharded-19485/psmdb-client-696897d69b-8t86g:/tmp ++ mktemp + local LAST_OUT=/tmp/tmp.TRvepUkPs3 ++ mktemp + local LAST_ERR=/tmp/tmp.MlGfSelzvc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl cp /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/data.js cross-site-sharded-19485/psmdb-client-696897d69b-8t86g:/tmp + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TRvepUkPs3 + cat /tmp/tmp.MlGfSelzvc + rm /tmp/tmp.TRvepUkPs3 /tmp/tmp.MlGfSelzvc + return 0 + kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'mongo mongodb://user:pass@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local/admin /tmp/data.js' ++ mktemp + local LAST_OUT=/tmp/tmp.eklrRtlsZ3 ++ mktemp + local LAST_ERR=/tmp/tmp.jW4FtY7ch8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'mongo mongodb://user:pass@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local/admin /tmp/data.js' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eklrRtlsZ3 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("4946ee99-0501-43dd-b904-e7e01ee4be77") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match + cat /tmp/tmp.jW4FtY7ch8 + rm /tmp/tmp.eklrRtlsZ3 /tmp/tmp.jW4FtY7ch8 + return 0 + desc 'shard collection' + set +o xtrace ----------------------------------------------------------------------------------- shard collection ----------------------------------------------------------------------------------- + run_mongos 'sh.enableSharding("app")' clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485 + local 'command=sh.enableSharding("app")' + local uri=clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iHwREFSIlZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.sQun1gL3tr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iHwREFSIlZ ++ cat /tmp/tmp.sQun1gL3tr ++ rm /tmp/tmp.iHwREFSIlZ /tmp/tmp.sQun1gL3tr ++ return 0 + local client_container=psmdb-client-696897d69b-8t86g + kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''sh.enableSharding("app")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.ScCT0IfL38 ++ mktemp + local LAST_ERR=/tmp/tmp.pm3UHpZF2L + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''sh.enableSharding("app")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ScCT0IfL38 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("f59bd55e-587f-437c-a308-208f3fa72fe1") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1766494844, 1), "signature" : { "hash" : BinData(0,"Cx17vCwj5e+5LI7IAnkVIMe1hSo="), "keyId" : NumberLong("7587036814733475853") } }, "operationTime" : Timestamp(1766494844, 1) } bye + cat /tmp/tmp.pm3UHpZF2L + rm /tmp/tmp.ScCT0IfL38 /tmp/tmp.pm3UHpZF2L + return 0 + sleep 2 + run_mongos 'sh.shardCollection("app.city", { _id: 1 } )' clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485 + local 'command=sh.shardCollection("app.city", { _id: 1 } )' + local uri=clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ed9Mnnz4Ju +++ mktemp ++ local LAST_ERR=/tmp/tmp.oqhACNe8iz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ed9Mnnz4Ju ++ cat /tmp/tmp.oqhACNe8iz ++ rm /tmp/tmp.Ed9Mnnz4Ju /tmp/tmp.oqhACNe8iz ++ return 0 + local client_container=psmdb-client-696897d69b-8t86g + kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''sh.shardCollection("app.city", { _id: 1 } )\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.DlhwqLpg1g ++ mktemp + local LAST_ERR=/tmp/tmp.Ry4w0QxeCu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''sh.shardCollection("app.city", { _id: 1 } )\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DlhwqLpg1g Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("5fb5e814-f231-4c53-9163-bf881248ceb6") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match { "collectionsharded" : "app.city", "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1766494849, 20), "signature" : { "hash" : BinData(0,"H6XUFqv+xfFqj7VkHNOO+tXJmY4="), "keyId" : NumberLong("7587036814733475853") } }, "operationTime" : Timestamp(1766494849, 19) } bye + cat /tmp/tmp.Ry4w0QxeCu + rm /tmp/tmp.DlhwqLpg1g /tmp/tmp.Ry4w0QxeCu + return 0 + sleep 120 + desc 'Check chunks' + set +o xtrace ----------------------------------------------------------------------------------- Check chunks ----------------------------------------------------------------------------------- + chunks_param1=ns + chunks_param2='"app.city"' + [[ 8.0 != \4\.\4 ]] + chunks_param1=uuid ++ grep 'switched to db app' -A 1 ++ run_mongos 'use app\n db.getCollectionInfos({ "name": "city" })[0].info.uuid' user:pass@cross-site-sharded-main-mongos.cross-site-sharded-19485 ++ local 'command=use app\n db.getCollectionInfos({ "name": "city" })[0].info.uuid' ++ local uri=user:pass@cross-site-sharded-main-mongos.cross-site-sharded-19485 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag= ++ local port=27017 ++ grep -v 'switched to db app' ++ local mongo_bin=mongo +++ echo .svc.cluster.local +++ awk -F: '{print $2}' ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4f98zixSfy ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SNbtSI3fPx +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4f98zixSfy +++ cat /tmp/tmp.SNbtSI3fPx +++ rm /tmp/tmp.4f98zixSfy /tmp/tmp.SNbtSI3fPx +++ return 0 ++ local client_container=psmdb-client-696897d69b-8t86g ++ kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''use app\n db.getCollectionInfos({ "name": "city" })[0].info.uuid\n'\'' | mongo mongodb://user:pass@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.un8H2kY7WA +++ mktemp ++ local LAST_ERR=/tmp/tmp.SbUP6tKhFC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''use app\n db.getCollectionInfos({ "name": "city" })[0].info.uuid\n'\'' | mongo mongodb://user:pass@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.un8H2kY7WA ++ cat /tmp/tmp.SbUP6tKhFC ++ rm /tmp/tmp.un8H2kY7WA /tmp/tmp.SbUP6tKhFC ++ return 0 + chunks_param2='UUID("b450ce39-8f79-441b-8f3d-199592a03f07")' + shards=0 + for i in "rs0" "rs1" ++ run_mongos 'use config\n db.chunks.count({"uuid": UUID("b450ce39-8f79-441b-8f3d-199592a03f07"), "shard": "rs0"})' clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485 ++ local 'command=use config\n db.chunks.count({"uuid": UUID("b450ce39-8f79-441b-8f3d-199592a03f07"), "shard": "rs0"})' ++ local uri=clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485 ++ grep 'switched to db config' -A 1 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ grep -v 'switched to db config' +++ echo .svc.cluster.local +++ awk -F: '{print $2}' ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GSzBpWUFrN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.s5gPHLpEEz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.GSzBpWUFrN +++ cat /tmp/tmp.s5gPHLpEEz +++ rm /tmp/tmp.GSzBpWUFrN /tmp/tmp.s5gPHLpEEz +++ return 0 ++ local client_container=psmdb-client-696897d69b-8t86g ++ kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''use config\n db.chunks.count({"uuid": UUID("b450ce39-8f79-441b-8f3d-199592a03f07"), "shard": "rs0"})\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YWm9LZw4QR +++ mktemp ++ local LAST_ERR=/tmp/tmp.FROo8Cgfyh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''use config\n db.chunks.count({"uuid": UUID("b450ce39-8f79-441b-8f3d-199592a03f07"), "shard": "rs0"})\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YWm9LZw4QR ++ cat /tmp/tmp.FROo8Cgfyh ++ rm /tmp/tmp.YWm9LZw4QR /tmp/tmp.FROo8Cgfyh ++ return 0 + out=3 + desc 'rs0 has 3 chunks' + set +o xtrace ----------------------------------------------------------------------------------- rs0 has 3 chunks ----------------------------------------------------------------------------------- + [[ 3 -ne 0 ]] + (( shards = shards + 1 )) + for i in "rs0" "rs1" ++ run_mongos 'use config\n db.chunks.count({"uuid": UUID("b450ce39-8f79-441b-8f3d-199592a03f07"), "shard": "rs1"})' clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485 ++ local 'command=use config\n db.chunks.count({"uuid": UUID("b450ce39-8f79-441b-8f3d-199592a03f07"), "shard": "rs1"})' ++ local uri=clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ grep 'switched to db config' -A 1 ++ grep -v 'switched to db config' +++ echo .svc.cluster.local +++ awk -F: '{print $2}' ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.SrudpJ4yxt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GTW8AtAYq4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.SrudpJ4yxt +++ cat /tmp/tmp.GTW8AtAYq4 +++ rm /tmp/tmp.SrudpJ4yxt /tmp/tmp.GTW8AtAYq4 +++ return 0 ++ local client_container=psmdb-client-696897d69b-8t86g ++ kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''use config\n db.chunks.count({"uuid": UUID("b450ce39-8f79-441b-8f3d-199592a03f07"), "shard": "rs1"})\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qpUMQRHx6o +++ mktemp ++ local LAST_ERR=/tmp/tmp.bU9Vqpck65 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''use config\n db.chunks.count({"uuid": UUID("b450ce39-8f79-441b-8f3d-199592a03f07"), "shard": "rs1"})\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qpUMQRHx6o ++ cat /tmp/tmp.bU9Vqpck65 ++ rm /tmp/tmp.qpUMQRHx6o /tmp/tmp.bU9Vqpck65 ++ return 0 + out=1 + desc 'rs1 has 1 chunks' + set +o xtrace ----------------------------------------------------------------------------------- rs1 has 1 chunks ----------------------------------------------------------------------------------- + [[ 1 -ne 0 ]] + (( shards = shards + 1 )) + [[ 2 -lt 2 ]] + desc 'create replica cluster' + set +o xtrace ----------------------------------------------------------------------------------- create replica cluster ----------------------------------------------------------------------------------- + create_namespace cross-site-sharded-replica-25250 0 + local namespace=cross-site-sharded-replica-25250 + local skip_clean_namespace=0 + [[ 1 == 1 ]] + [[ -z 0 ]] + '[' -n '' ']' + desc 'cleaned up old namespaces cross-site-sharded-replica-25250' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces cross-site-sharded-replica-25250 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace cross-site-sharded-replica-25250 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.s2FYNBM147 ++ mktemp + local LAST_ERR=/tmp/tmp.EYP51XJnTZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace cross-site-sharded-replica-25250 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.s2FYNBM147 + cat /tmp/tmp.EYP51XJnTZ + rm /tmp/tmp.s2FYNBM147 /tmp/tmp.EYP51XJnTZ + return 0 + kubectl_bin wait --for=delete namespace cross-site-sharded-replica-25250 ++ mktemp + local LAST_OUT=/tmp/tmp.ts2jAaajpb ++ mktemp + local LAST_ERR=/tmp/tmp.jXGVsgXFIW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace cross-site-sharded-replica-25250 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ts2jAaajpb + cat /tmp/tmp.jXGVsgXFIW + rm /tmp/tmp.ts2jAaajpb /tmp/tmp.jXGVsgXFIW + return 0 + desc 'create namespace cross-site-sharded-replica-25250' + set +o xtrace ----------------------------------------------------------------------------------- create namespace cross-site-sharded-replica-25250 ----------------------------------------------------------------------------------- + kubectl_bin create namespace cross-site-sharded-replica-25250 ++ mktemp + local LAST_OUT=/tmp/tmp.d9SFSCui0l ++ mktemp + local LAST_ERR=/tmp/tmp.CjBWSSG43m + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace cross-site-sharded-replica-25250 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.d9SFSCui0l namespace/cross-site-sharded-replica-25250 created + cat /tmp/tmp.CjBWSSG43m + rm /tmp/tmp.d9SFSCui0l /tmp/tmp.CjBWSSG43m + return 0 + set_kube_ctx cross-site-sharded-replica-25250 + local namespace=cross-site-sharded-replica-25250 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.opUSwmpurs +++ mktemp ++ local LAST_ERR=/tmp/tmp.n96wqSYHMA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.opUSwmpurs ++ cat /tmp/tmp.n96wqSYHMA ++ rm /tmp/tmp.opUSwmpurs /tmp/tmp.n96wqSYHMA ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2135-b9c4516c-6-cluster3 --namespace=cross-site-sharded-replica-25250 ++ mktemp + local LAST_OUT=/tmp/tmp.XYPSeSvUIM ++ mktemp + local LAST_ERR=/tmp/tmp.L7iT89ld4n + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2135-b9c4516c-6-cluster3 --namespace=cross-site-sharded-replica-25250 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XYPSeSvUIM Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2135-b9c4516c-6-cluster3" modified. + cat /tmp/tmp.L7iT89ld4n + rm /tmp/tmp.XYPSeSvUIM /tmp/tmp.L7iT89ld4n + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2135-b9c4516c' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2135-b9c4516c ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.pjGAXn5ZNL ++ mktemp + local LAST_ERR=/tmp/tmp.ExCbMMgP74 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pjGAXn5ZNL customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.ExCbMMgP74 + rm /tmp/tmp.pjGAXn5ZNL /tmp/tmp.ExCbMMgP74 + return 0 + '[' -n '' ']' + apply_rbac rbac + local operator_namespace=psmdb-operator + local rbac=rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/deploy/rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.LeM9nUWfm5 ++ mktemp + local LAST_ERR=/tmp/tmp.Bfj9QophT8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LeM9nUWfm5 role.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created rolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.Bfj9QophT8 + rm /tmp/tmp.LeM9nUWfm5 /tmp/tmp.Bfj9QophT8 + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2135-b9c4516c") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/deploy/operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.vChzs6snH7 ++ mktemp + local LAST_ERR=/tmp/tmp.ey8Cz9Yahq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vChzs6snH7 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.ey8Cz9Yahq + rm /tmp/tmp.vChzs6snH7 /tmp/tmp.ey8Cz9Yahq + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cGaHUMyQwc +++ mktemp ++ local LAST_ERR=/tmp/tmp.Hlw9uiNRo4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cGaHUMyQwc ++ cat /tmp/tmp.Hlw9uiNRo4 ++ rm /tmp/tmp.cGaHUMyQwc /tmp/tmp.Hlw9uiNRo4 ++ return 0 + wait_operator_pod percona-server-mongodb-operator-679bb66968-dxlf8 + local pod=percona-server-mongodb-operator-679bb66968-dxlf8 + set +o xtrace waiting for pod/percona-server-mongodb-operator-679bb66968-dxlf8 to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BmtnJnqwRO +++ mktemp ++ local LAST_ERR=/tmp/tmp.yhWbS4UKuU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BmtnJnqwRO ++ cat /tmp/tmp.yhWbS4UKuU ++ rm /tmp/tmp.BmtnJnqwRO /tmp/tmp.yhWbS4UKuU ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-679bb66968-dxlf8 ++ mktemp + local LAST_OUT=/tmp/tmp.jcpFo1ipBn ++ mktemp + local LAST_ERR=/tmp/tmp.mU86Pt8FKl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs percona-server-mongodb-operator-679bb66968-dxlf8 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jcpFo1ipBn + cat /tmp/tmp.mU86Pt8FKl + rm /tmp/tmp.jcpFo1ipBn /tmp/tmp.mU86Pt8FKl + return 0 2025-12-23T13:03:10.304Z INFO setup Manager starting up {"gitCommit": "b9c4516cc593d15ff1df09a591d9d45490aaf7c6", "gitBranch": "PR-2135-b9c4516c", "buildTime": "", "goVersion": "go1.25.5", "os": "linux", "arch": "amd64"} + desc 'start client' + set +o xtrace ----------------------------------------------------------------------------------- start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.6xiczFKssH ++ mktemp + local LAST_ERR=/tmp/tmp.wMqvFNxZY1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6xiczFKssH deployment.apps/psmdb-client created + cat /tmp/tmp.wMqvFNxZY1 + rm /tmp/tmp.6xiczFKssH /tmp/tmp.wMqvFNxZY1 + return 0 + desc 'copy secrets from main to replica namespace and create all of them' + set +o xtrace ----------------------------------------------------------------------------------- copy secrets from main to replica namespace and create all of them ----------------------------------------------------------------------------------- + kubectl get secret cross-site-sharded-main-secrets -o yaml -n cross-site-sharded-19485 + yq eval ' del(.metadata) | (.metadata.name = "cross-site-sharded-replica-secrets")' - + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.hFj4UPrxJf ++ mktemp + local LAST_ERR=/tmp/tmp.jb4OCDWRry + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hFj4UPrxJf secret/cross-site-sharded-replica-secrets created + cat /tmp/tmp.jb4OCDWRry + rm /tmp/tmp.hFj4UPrxJf /tmp/tmp.jb4OCDWRry + return 0 + kubectl_bin get secret cross-site-sharded-main-ssl-internal -o yaml -n cross-site-sharded-19485 + yq eval ' del(.metadata) | del(.status) | (.metadata.name = "cross-site-sharded-replica-ssl-internal")' - + kubectl_bin apply -f - ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.4AQn3qbc83 ++ mktemp + local LAST_OUT=/tmp/tmp.zjHQm5I940 ++ mktemp + local LAST_ERR=/tmp/tmp.SCOvWemz9j + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.6YLvQdkDOp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get secret cross-site-sharded-main-ssl-internal -o yaml -n cross-site-sharded-19485 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4AQn3qbc83 + cat /tmp/tmp.SCOvWemz9j + rm /tmp/tmp.4AQn3qbc83 /tmp/tmp.SCOvWemz9j + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zjHQm5I940 secret/cross-site-sharded-replica-ssl-internal created + cat /tmp/tmp.6YLvQdkDOp + rm /tmp/tmp.zjHQm5I940 /tmp/tmp.6YLvQdkDOp + return 0 + kubectl_bin get secret cross-site-sharded-main-ssl -o yaml -n cross-site-sharded-19485 + yq eval ' del(.metadata) | del(.status) | (.metadata.name = "cross-site-sharded-replica-ssl")' - + kubectl_bin apply -f - ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.x2LWuxpqPz + local LAST_OUT=/tmp/tmp.hSWJf7u9JE ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.IwpjjYmkpi + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.xUtioHASL2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get secret cross-site-sharded-main-ssl -o yaml -n cross-site-sharded-19485 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.x2LWuxpqPz + cat /tmp/tmp.IwpjjYmkpi + rm /tmp/tmp.x2LWuxpqPz /tmp/tmp.IwpjjYmkpi + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hSWJf7u9JE secret/cross-site-sharded-replica-ssl created + cat /tmp/tmp.xUtioHASL2 + rm /tmp/tmp.hSWJf7u9JE /tmp/tmp.xUtioHASL2 + return 0 + sleep 30 + desc 'create replica PSMDB cluster ' + set +o xtrace ----------------------------------------------------------------------------------- create replica PSMDB cluster ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/conf/cross-site-sharded-replica.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/conf/cross-site-sharded-replica.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/conf/cross-site-sharded-replica.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2135-b9c4516c"' + /usr/sbin/sed -e s/NAME_SPACE/cross-site-sharded-19485/g + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.H2HyZV1Hp6 ++ mktemp + local LAST_ERR=/tmp/tmp.tBbXR0RlWC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.H2HyZV1Hp6 perconaservermongodb.psmdb.percona.com/cross-site-sharded-replica created + cat /tmp/tmp.tBbXR0RlWC + rm /tmp/tmp.H2HyZV1Hp6 /tmp/tmp.tBbXR0RlWC + return 0 + wait_for_running cross-site-sharded-replica-rs0 3 false + local name=cross-site-sharded-replica-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=cross-site-sharded-replica ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod cross-site-sharded-replica-rs0-0 + local pod=cross-site-sharded-replica-rs0-0 + set +o xtrace waiting for pod/cross-site-sharded-replica-rs0-0 to be ready.................OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod cross-site-sharded-replica-rs0-1 + local pod=cross-site-sharded-replica-rs0-1 + set +o xtrace waiting for pod/cross-site-sharded-replica-rs0-1 to be ready...............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yAk8zh7CtQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.FcJL5vOakc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yAk8zh7CtQ ++ cat /tmp/tmp.FcJL5vOakc ++ rm /tmp/tmp.yAk8zh7CtQ /tmp/tmp.FcJL5vOakc ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod cross-site-sharded-replica-rs0-2 + local pod=cross-site-sharded-replica-rs0-2 + set +o xtrace waiting for pod/cross-site-sharded-replica-rs0-2 to be ready..........OK ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.agYSKwYzdY +++ mktemp ++ local LAST_ERR=/tmp/tmp.TYhmsXLLAX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.agYSKwYzdY ++ cat /tmp/tmp.TYhmsXLLAX ++ rm /tmp/tmp.agYSKwYzdY /tmp/tmp.TYhmsXLLAX ++ return 0 + [[ false == \t\r\u\e ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5kjJn905qI +++ mktemp ++ local LAST_ERR=/tmp/tmp.rVK4gdSOHK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5kjJn905qI ++ cat /tmp/tmp.rVK4gdSOHK ++ rm /tmp/tmp.5kjJn905qI /tmp/tmp.rVK4gdSOHK ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running cross-site-sharded-replica-rs1 3 false + local name=cross-site-sharded-replica-rs1 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs1 + local cluster_name=cross-site-sharded-replica ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod cross-site-sharded-replica-rs1-0 + local pod=cross-site-sharded-replica-rs1-0 + set +o xtrace waiting for pod/cross-site-sharded-replica-rs1-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod cross-site-sharded-replica-rs1-1 + local pod=cross-site-sharded-replica-rs1-1 + set +o xtrace waiting for pod/cross-site-sharded-replica-rs1-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DEIsKVlQ9B +++ mktemp ++ local LAST_ERR=/tmp/tmp.sO2Wsqu41h ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DEIsKVlQ9B ++ cat /tmp/tmp.sO2Wsqu41h ++ rm /tmp/tmp.DEIsKVlQ9B /tmp/tmp.sO2Wsqu41h ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod cross-site-sharded-replica-rs1-2 + local pod=cross-site-sharded-replica-rs1-2 + set +o xtrace waiting for pod/cross-site-sharded-replica-rs1-2 to be ready.OK ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.roYb8vAxRY +++ mktemp ++ local LAST_ERR=/tmp/tmp.2OFfSHDjmP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.roYb8vAxRY ++ cat /tmp/tmp.2OFfSHDjmP ++ rm /tmp/tmp.roYb8vAxRY /tmp/tmp.2OFfSHDjmP ++ return 0 + [[ false == \t\r\u\e ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1y92weAyzJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.6dv83dkYKt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1y92weAyzJ ++ cat /tmp/tmp.6dv83dkYKt ++ rm /tmp/tmp.1y92weAyzJ /tmp/tmp.6dv83dkYKt ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running cross-site-sharded-replica-cfg 3 false + local name=cross-site-sharded-replica-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=cross-site-sharded-replica ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod cross-site-sharded-replica-cfg-0 + local pod=cross-site-sharded-replica-cfg-0 + set +o xtrace waiting for pod/cross-site-sharded-replica-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod cross-site-sharded-replica-cfg-1 + local pod=cross-site-sharded-replica-cfg-1 + set +o xtrace waiting for pod/cross-site-sharded-replica-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LL3dz2Retp +++ mktemp ++ local LAST_ERR=/tmp/tmp.eU3wL3rKix ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LL3dz2Retp ++ cat /tmp/tmp.eU3wL3rKix ++ rm /tmp/tmp.LL3dz2Retp /tmp/tmp.eU3wL3rKix ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod cross-site-sharded-replica-cfg-2 + local pod=cross-site-sharded-replica-cfg-2 + set +o xtrace waiting for pod/cross-site-sharded-replica-cfg-2 to be ready.OK ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cIylJ3OChK +++ mktemp ++ local LAST_ERR=/tmp/tmp.lb1rnFVoHO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cIylJ3OChK ++ cat /tmp/tmp.lb1rnFVoHO ++ rm /tmp/tmp.cIylJ3OChK /tmp/tmp.lb1rnFVoHO ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UJ3QIhi5WE +++ mktemp ++ local LAST_ERR=/tmp/tmp.Iab55ygk1A ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UJ3QIhi5WE ++ cat /tmp/tmp.Iab55ygk1A ++ rm /tmp/tmp.UJ3QIhi5WE /tmp/tmp.Iab55ygk1A ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] ++ get_service_ip cross-site-sharded-replica-cfg-0 cfg ++ local service=cross-site-sharded-replica-cfg-0 ++ local server_type=cfg +++ kubectl_bin get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WIY0vq38jx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.dQEvLpiJwK +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.WIY0vq38jx +++ cat /tmp/tmp.dQEvLpiJwK +++ rm /tmp/tmp.WIY0vq38jx /tmp/tmp.dQEvLpiJwK +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cross-site-sharded-replica-cfg-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-sharded-replica-cfg-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZFiTtLAB4R ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FqBdYlGvLR +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cross-site-sharded-replica-cfg-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ZFiTtLAB4R +++ cat /tmp/tmp.FqBdYlGvLR +++ rm /tmp/tmp.ZFiTtLAB4R /tmp/tmp.FqBdYlGvLR +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-sharded-replica-cfg-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hmTrOfPH8E +++ mktemp ++ local LAST_ERR=/tmp/tmp.6uYR1NbjBW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cross-site-sharded-replica-cfg-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hmTrOfPH8E ++ cat /tmp/tmp.6uYR1NbjBW ++ rm /tmp/tmp.hmTrOfPH8E /tmp/tmp.6uYR1NbjBW ++ return 0 ++ return + replica_cfg_0_endpoint=34.118.225.150 ++ get_service_ip cross-site-sharded-replica-cfg-1 cfg ++ local service=cross-site-sharded-replica-cfg-1 ++ local server_type=cfg +++ kubectl_bin get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pJ2IBWRsGv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TU7BHiO68S +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.pJ2IBWRsGv +++ cat /tmp/tmp.TU7BHiO68S +++ rm /tmp/tmp.pJ2IBWRsGv /tmp/tmp.TU7BHiO68S +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cross-site-sharded-replica-cfg-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-sharded-replica-cfg-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PtBNpBFEgR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5qHHEcLpD6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cross-site-sharded-replica-cfg-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.PtBNpBFEgR +++ cat /tmp/tmp.5qHHEcLpD6 +++ rm /tmp/tmp.PtBNpBFEgR /tmp/tmp.5qHHEcLpD6 +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-sharded-replica-cfg-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Gf3lgsPDHc +++ mktemp ++ local LAST_ERR=/tmp/tmp.0IWCQhXFGS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cross-site-sharded-replica-cfg-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Gf3lgsPDHc ++ cat /tmp/tmp.0IWCQhXFGS ++ rm /tmp/tmp.Gf3lgsPDHc /tmp/tmp.0IWCQhXFGS ++ return 0 ++ return + replica_cfg_1_endpoint=34.118.238.76 ++ get_service_ip cross-site-sharded-replica-cfg-2 cfg ++ local service=cross-site-sharded-replica-cfg-2 ++ local server_type=cfg +++ kubectl_bin get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.BPQqv1sUI3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MKbSbLxs5y +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.BPQqv1sUI3 +++ cat /tmp/tmp.MKbSbLxs5y +++ rm /tmp/tmp.BPQqv1sUI3 /tmp/tmp.MKbSbLxs5y +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cross-site-sharded-replica-cfg-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-sharded-replica-cfg-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Vj8Lb7KkND ++++ mktemp +++ local LAST_ERR=/tmp/tmp.lXZizIkcAA +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cross-site-sharded-replica-cfg-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Vj8Lb7KkND +++ cat /tmp/tmp.lXZizIkcAA +++ rm /tmp/tmp.Vj8Lb7KkND /tmp/tmp.lXZizIkcAA +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-sharded-replica-cfg-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q4fgeTU12K +++ mktemp ++ local LAST_ERR=/tmp/tmp.cqaXq98Udk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cross-site-sharded-replica-cfg-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q4fgeTU12K ++ cat /tmp/tmp.cqaXq98Udk ++ rm /tmp/tmp.q4fgeTU12K /tmp/tmp.cqaXq98Udk ++ return 0 ++ return + replica_cfg_2_endpoint=34.118.230.151 ++ get_service_ip cross-site-sharded-replica-rs0-0 ++ local service=cross-site-sharded-replica-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.T4kNrbY94p ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SVddTnnwCb +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.T4kNrbY94p +++ cat /tmp/tmp.SVddTnnwCb +++ rm /tmp/tmp.T4kNrbY94p /tmp/tmp.SVddTnnwCb +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs0-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-sharded-replica-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qvZ1NFIPoI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5yEeDowjw9 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cross-site-sharded-replica-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.qvZ1NFIPoI +++ cat /tmp/tmp.5yEeDowjw9 +++ rm /tmp/tmp.qvZ1NFIPoI /tmp/tmp.5yEeDowjw9 +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs0-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.v93JoNO2Rc +++ mktemp ++ local LAST_ERR=/tmp/tmp.4lXNEwX1pu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cross-site-sharded-replica-rs0-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v93JoNO2Rc ++ cat /tmp/tmp.4lXNEwX1pu ++ rm /tmp/tmp.v93JoNO2Rc /tmp/tmp.4lXNEwX1pu ++ return 0 ++ return + replica_rs0_0_endpoint=34.118.229.83 ++ get_service_ip cross-site-sharded-replica-rs0-1 ++ local service=cross-site-sharded-replica-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zak8ZrTdA5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ea7jrIdfmp +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.zak8ZrTdA5 +++ cat /tmp/tmp.ea7jrIdfmp +++ rm /tmp/tmp.zak8ZrTdA5 /tmp/tmp.ea7jrIdfmp +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/cross-site-sharded-replica-rs0-1 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/cross-site-sharded-replica-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FGjfdggtO3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.haYQqYcMut +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cross-site-sharded-replica-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.FGjfdggtO3 +++ cat /tmp/tmp.haYQqYcMut +++ rm /tmp/tmp.FGjfdggtO3 /tmp/tmp.haYQqYcMut +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs0-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NQSfJ1gjHi +++ mktemp ++ local LAST_ERR=/tmp/tmp.3eEQMxESBg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cross-site-sharded-replica-rs0-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NQSfJ1gjHi ++ cat /tmp/tmp.3eEQMxESBg ++ rm /tmp/tmp.NQSfJ1gjHi /tmp/tmp.3eEQMxESBg ++ return 0 ++ return + replica_rs0_1_endpoint=34.118.238.105 ++ get_service_ip cross-site-sharded-replica-rs0-2 ++ local service=cross-site-sharded-replica-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5BHVZ7ytZU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VX3DldvDZz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.5BHVZ7ytZU +++ cat /tmp/tmp.VX3DldvDZz +++ rm /tmp/tmp.5BHVZ7ytZU /tmp/tmp.VX3DldvDZz +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-sharded-replica-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.noyfqp2bsC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.jJGofKnDxq +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cross-site-sharded-replica-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.noyfqp2bsC +++ cat /tmp/tmp.jJGofKnDxq +++ rm /tmp/tmp.noyfqp2bsC /tmp/tmp.jJGofKnDxq +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs0-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.v2WPX9NRpt +++ mktemp ++ local LAST_ERR=/tmp/tmp.yVZlCqrPU9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cross-site-sharded-replica-rs0-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v2WPX9NRpt ++ cat /tmp/tmp.yVZlCqrPU9 ++ rm /tmp/tmp.v2WPX9NRpt /tmp/tmp.yVZlCqrPU9 ++ return 0 ++ return + replica_rs0_2_endpoint=34.118.227.124 ++ get_service_ip cross-site-sharded-replica-rs1-0 rs1 ++ local service=cross-site-sharded-replica-rs1-0 ++ local server_type=rs1 +++ kubectl_bin get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4VkzT4kNcu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hHCHuupMpX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4VkzT4kNcu +++ cat /tmp/tmp.hHCHuupMpX +++ rm /tmp/tmp.4VkzT4kNcu /tmp/tmp.hHCHuupMpX +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs1-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-sharded-replica-rs1-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4nYjmUDkMd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bQOc5j1wDw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cross-site-sharded-replica-rs1-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4nYjmUDkMd +++ cat /tmp/tmp.bQOc5j1wDw +++ rm /tmp/tmp.4nYjmUDkMd /tmp/tmp.bQOc5j1wDw +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs1-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Jpz10F2cM7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Y9xEoJvPEe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cross-site-sharded-replica-rs1-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Jpz10F2cM7 ++ cat /tmp/tmp.Y9xEoJvPEe ++ rm /tmp/tmp.Jpz10F2cM7 /tmp/tmp.Y9xEoJvPEe ++ return 0 ++ return + replica_rs1_0_endpoint=34.118.227.231 ++ get_service_ip cross-site-sharded-replica-rs1-1 rs1 ++ local service=cross-site-sharded-replica-rs1-1 ++ local server_type=rs1 +++ kubectl_bin get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3Fkda7wQCw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GoRHypbOcT +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.3Fkda7wQCw +++ cat /tmp/tmp.GoRHypbOcT +++ rm /tmp/tmp.3Fkda7wQCw /tmp/tmp.GoRHypbOcT +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs1-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-sharded-replica-rs1-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GgCNjdiYmz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8qELxbLUY0 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cross-site-sharded-replica-rs1-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.GgCNjdiYmz +++ cat /tmp/tmp.8qELxbLUY0 +++ rm /tmp/tmp.GgCNjdiYmz /tmp/tmp.8qELxbLUY0 +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs1-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.24Yc8AEYXQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.M378WBBSYp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cross-site-sharded-replica-rs1-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.24Yc8AEYXQ ++ cat /tmp/tmp.M378WBBSYp ++ rm /tmp/tmp.24Yc8AEYXQ /tmp/tmp.M378WBBSYp ++ return 0 ++ return + replica_rs1_1_endpoint=34.118.225.53 ++ get_service_ip cross-site-sharded-replica-rs1-2 rs1 ++ local service=cross-site-sharded-replica-rs1-2 ++ local server_type=rs1 +++ kubectl_bin get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ODmeSl087S ++++ mktemp +++ local LAST_ERR=/tmp/tmp.dfnAlY01rb +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cross-site-sharded-replica -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ODmeSl087S +++ cat /tmp/tmp.dfnAlY01rb +++ rm /tmp/tmp.ODmeSl087S /tmp/tmp.dfnAlY01rb +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs1-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cross-site-sharded-replica-rs1-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3BRSV0Ck3t ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QPf6JtNSiK +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cross-site-sharded-replica-rs1-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.3BRSV0Ck3t +++ cat /tmp/tmp.QPf6JtNSiK +++ rm /tmp/tmp.3BRSV0Ck3t /tmp/tmp.QPf6JtNSiK +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cross-site-sharded-replica-rs1-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.htd164IfQR +++ mktemp ++ local LAST_ERR=/tmp/tmp.r57fzlhxTD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cross-site-sharded-replica-rs1-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.htd164IfQR ++ cat /tmp/tmp.r57fzlhxTD ++ rm /tmp/tmp.htd164IfQR /tmp/tmp.r57fzlhxTD ++ return 0 ++ return + replica_rs1_2_endpoint=34.118.234.62 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.Gg7WP3p1lx +++ mktemp ++ local LAST_ERR=/tmp/tmp.knI4ANVlGn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Gg7WP3p1lx ++ cat /tmp/tmp.knI4ANVlGn ++ rm /tmp/tmp.Gg7WP3p1lx /tmp/tmp.knI4ANVlGn ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2135-b9c4516c-6-cluster3 --namespace=cross-site-sharded-19485 ++ mktemp + local LAST_OUT=/tmp/tmp.LHzxmV8srr ++ mktemp + local LAST_ERR=/tmp/tmp.rbABTkf2e7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2135-b9c4516c-6-cluster3 --namespace=cross-site-sharded-19485 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LHzxmV8srr Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2135-b9c4516c-6-cluster3" modified. + cat /tmp/tmp.rbABTkf2e7 + rm /tmp/tmp.LHzxmV8srr /tmp/tmp.rbABTkf2e7 + return 0 + kubectl_bin patch psmdb cross-site-sharded-main --type=merge --patch '{ "spec": {"replsets":[ {"affinity":{"antiAffinityTopologyKey": "none"},"arbiter":{"affinity":{"antiAffinityTopologyKey": "none"},"enabled":false,"size":1},"expose":{"enabled":true,"type":"ClusterIp"},"externalNodes":[{"host":"34.118.229.83","priority":0,"votes":0},{"host":"34.118.238.105","port":27017,"priority":1,"votes":1},{"host":"34.118.227.124", "port":27017,"priority":1,"votes":1}],"name":"rs0","nonvoting":{"affinity":{"antiAffinityTopologyKey":"none"},"enabled":false,"podDisruptionBudget":{"maxUnavailable":1},"resources":{"limits":{"cpu":"300m","memory":"0.5G"},"requests":{"cpu":"300m","memory":"0.5G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"1Gi"}}}}},"podDisruptionBudget":{"maxUnavailable":1},"resources":{"limits":{"cpu":"300m","memory":"0.5G"},"requests":{"cpu":"300m","memory":"0.5G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}, {"affinity":{"antiAffinityTopologyKey": "none"},"arbiter":{"affinity":{"antiAffinityTopologyKey": "none"},"enabled":false,"size":1},"expose":{"enabled":true,"type":"ClusterIp"},"externalNodes":[{"host":"34.118.227.231","priority":0,"votes":0},{"host":"34.118.225.53","port":27017,"priority":1,"votes":1},{"host":"34.118.234.62", "port":27017,"priority":1,"votes":1}],"name":"rs1","nonvoting":{"affinity":{"antiAffinityTopologyKey":"none"},"enabled":false,"podDisruptionBudget":{"maxUnavailable":1},"resources":{"limits":{"cpu":"300m","memory":"0.5G"},"requests":{"cpu":"300m","memory":"0.5G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"1Gi"}}}}},"podDisruptionBudget":{"maxUnavailable":1},"resources":{"limits":{"cpu":"300m","memory":"0.5G"},"requests":{"cpu":"300m","memory":"0.5G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}} ], "sharding":{"configsvrReplSet":{ "externalNodes": [{"host":"34.118.225.150","priority":1,"votes":1 },{"host":"34.118.238.76", "priority":1,"votes":1},{"host":"34.118.230.151","priority":0,"votes":0}]}} } }' ++ mktemp + local LAST_OUT=/tmp/tmp.vbfZJr0Ih3 ++ mktemp + local LAST_ERR=/tmp/tmp.8C5xthNVQx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb cross-site-sharded-main --type=merge --patch '{ "spec": {"replsets":[ {"affinity":{"antiAffinityTopologyKey": "none"},"arbiter":{"affinity":{"antiAffinityTopologyKey": "none"},"enabled":false,"size":1},"expose":{"enabled":true,"type":"ClusterIp"},"externalNodes":[{"host":"34.118.229.83","priority":0,"votes":0},{"host":"34.118.238.105","port":27017,"priority":1,"votes":1},{"host":"34.118.227.124", "port":27017,"priority":1,"votes":1}],"name":"rs0","nonvoting":{"affinity":{"antiAffinityTopologyKey":"none"},"enabled":false,"podDisruptionBudget":{"maxUnavailable":1},"resources":{"limits":{"cpu":"300m","memory":"0.5G"},"requests":{"cpu":"300m","memory":"0.5G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"1Gi"}}}}},"podDisruptionBudget":{"maxUnavailable":1},"resources":{"limits":{"cpu":"300m","memory":"0.5G"},"requests":{"cpu":"300m","memory":"0.5G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}, {"affinity":{"antiAffinityTopologyKey": "none"},"arbiter":{"affinity":{"antiAffinityTopologyKey": "none"},"enabled":false,"size":1},"expose":{"enabled":true,"type":"ClusterIp"},"externalNodes":[{"host":"34.118.227.231","priority":0,"votes":0},{"host":"34.118.225.53","port":27017,"priority":1,"votes":1},{"host":"34.118.234.62", "port":27017,"priority":1,"votes":1}],"name":"rs1","nonvoting":{"affinity":{"antiAffinityTopologyKey":"none"},"enabled":false,"podDisruptionBudget":{"maxUnavailable":1},"resources":{"limits":{"cpu":"300m","memory":"0.5G"},"requests":{"cpu":"300m","memory":"0.5G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"1Gi"}}}}},"podDisruptionBudget":{"maxUnavailable":1},"resources":{"limits":{"cpu":"300m","memory":"0.5G"},"requests":{"cpu":"300m","memory":"0.5G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}} ], "sharding":{"configsvrReplSet":{ "externalNodes": [{"host":"34.118.225.150","priority":1,"votes":1 },{"host":"34.118.238.76", "priority":1,"votes":1},{"host":"34.118.230.151","priority":0,"votes":0}]}} } }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vbfZJr0Ih3 perconaservermongodb.psmdb.percona.com/cross-site-sharded-main patched + cat /tmp/tmp.8C5xthNVQx + rm /tmp/tmp.vbfZJr0Ih3 /tmp/tmp.8C5xthNVQx + return 0 + wait_for_members 34.118.225.150 cfg 6 + local endpoint=34.118.225.150 + local rsName=cfg + local target_count=6 + local nodes_count=0 + [[ 0 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.225.150 mongodb :27017 ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.225.150 ++ local driver=mongodb ++ local suffix=:27017 ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.sh0tv8lZXz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5ubCess6l2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.sh0tv8lZXz +++ cat /tmp/tmp.5ubCess6l2 +++ rm /tmp/tmp.sh0tv8lZXz /tmp/tmp.5ubCess6l2 +++ return 0 ++ local client_container=psmdb-client-696897d69b-8t86g ++ kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.03jonRvvWA +++ mktemp ++ local LAST_ERR=/tmp/tmp.QesWBaaIvS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.03jonRvvWA ++ cat /tmp/tmp.QesWBaaIvS command terminated with exit code 1 ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.03jonRvvWA ++ cat /tmp/tmp.QesWBaaIvS command terminated with exit code 1 ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.03jonRvvWA ++ cat /tmp/tmp.QesWBaaIvS ++ rm /tmp/tmp.03jonRvvWA /tmp/tmp.QesWBaaIvS ++ return 0 + nodes_count='Error: Authentication failed. : connect@src/mongo/shell/mongo.js:374:17 @(connect):2:6 exception: connect failed exiting with code 1 Error: Authentication failed. : connect@src/mongo/shell/mongo.js:374:17 @(connect):2:6 exception: connect failed exiting with code 1 4' + echo -n 'waiting for all members to be configured in cfg' waiting for all members to be configured in cfg+ let retry+=1 + '[' 1 -ge 15 ']' + echo . . + sleep 10 + [[ Error: Authentication failed. : connect@src/mongo/shell/mongo.js:374:17 @(connect):2:6 exception: connect failed exiting with code 1 Error: Authentication failed. : connect@src/mongo/shell/mongo.js:374:17 @(connect):2:6 exception: connect failed exiting with code 1 4 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.225.150 mongodb :27017 ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.225.150 ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.O1X27FE672 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pRY8FZ2Vqa +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.O1X27FE672 +++ cat /tmp/tmp.pRY8FZ2Vqa +++ rm /tmp/tmp.O1X27FE672 /tmp/tmp.pRY8FZ2Vqa +++ return 0 ++ local client_container=psmdb-client-696897d69b-8t86g ++ kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dggL5rw6FD +++ mktemp ++ local LAST_ERR=/tmp/tmp.BiDw40j8jU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dggL5rw6FD ++ cat /tmp/tmp.BiDw40j8jU ++ rm /tmp/tmp.dggL5rw6FD /tmp/tmp.BiDw40j8jU ++ return 0 + nodes_count=5 + echo -n 'waiting for all members to be configured in cfg' waiting for all members to be configured in cfg+ let retry+=1 + '[' 2 -ge 15 ']' + echo . . + sleep 10 + [[ 5 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.225.150 mongodb :27017 ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.225.150 ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kSf3P9VXzK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ShF36f5ntY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kSf3P9VXzK +++ cat /tmp/tmp.ShF36f5ntY +++ rm /tmp/tmp.kSf3P9VXzK /tmp/tmp.ShF36f5ntY +++ return 0 ++ local client_container=psmdb-client-696897d69b-8t86g ++ kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LFt5fzpjCn +++ mktemp ++ local LAST_ERR=/tmp/tmp.v4BqLHGwZY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LFt5fzpjCn ++ cat /tmp/tmp.v4BqLHGwZY ++ rm /tmp/tmp.LFt5fzpjCn /tmp/tmp.v4BqLHGwZY ++ return 0 + nodes_count=5 + echo -n 'waiting for all members to be configured in cfg' waiting for all members to be configured in cfg+ let retry+=1 + '[' 3 -ge 15 ']' + echo . . + sleep 10 + [[ 5 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.225.150 mongodb :27017 ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.225.150 ++ local driver=mongodb ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ local suffix=:27017 ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PJ0mlLKx3k ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yEJGAfknsp +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.PJ0mlLKx3k +++ cat /tmp/tmp.yEJGAfknsp +++ rm /tmp/tmp.PJ0mlLKx3k /tmp/tmp.yEJGAfknsp +++ return 0 ++ local client_container=psmdb-client-696897d69b-8t86g ++ kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CH3TnMrCJ0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZEe2N0EZAA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CH3TnMrCJ0 ++ cat /tmp/tmp.ZEe2N0EZAA ++ rm /tmp/tmp.CH3TnMrCJ0 /tmp/tmp.ZEe2N0EZAA ++ return 0 + nodes_count=5 + echo -n 'waiting for all members to be configured in cfg' waiting for all members to be configured in cfg+ let retry+=1 + '[' 4 -ge 15 ']' + echo . . + sleep 10 + [[ 5 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.225.150 mongodb :27017 ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.225.150 ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Enwni8LqII ++++ mktemp +++ local LAST_ERR=/tmp/tmp.S92CZjyLkf +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Enwni8LqII +++ cat /tmp/tmp.S92CZjyLkf +++ rm /tmp/tmp.Enwni8LqII /tmp/tmp.S92CZjyLkf +++ return 0 ++ local client_container=psmdb-client-696897d69b-8t86g ++ kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.N0VuGLJuxC +++ mktemp ++ local LAST_ERR=/tmp/tmp.wqnN4XzVt9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.N0VuGLJuxC ++ cat /tmp/tmp.wqnN4XzVt9 ++ rm /tmp/tmp.N0VuGLJuxC /tmp/tmp.wqnN4XzVt9 ++ return 0 + nodes_count=5 + echo -n 'waiting for all members to be configured in cfg' waiting for all members to be configured in cfg+ let retry+=1 + '[' 5 -ge 15 ']' + echo . . + sleep 10 + [[ 5 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.225.150 mongodb :27017 ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.225.150 ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5ebrRKuZaU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5Ai1gvsTIM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.5ebrRKuZaU +++ cat /tmp/tmp.5Ai1gvsTIM +++ rm /tmp/tmp.5ebrRKuZaU /tmp/tmp.5Ai1gvsTIM +++ return 0 ++ local client_container=psmdb-client-696897d69b-8t86g ++ kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GoXpD9nqtA +++ mktemp ++ local LAST_ERR=/tmp/tmp.K8yposWn7g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GoXpD9nqtA ++ cat /tmp/tmp.K8yposWn7g ++ rm /tmp/tmp.GoXpD9nqtA /tmp/tmp.K8yposWn7g ++ return 0 + nodes_count=5 + echo -n 'waiting for all members to be configured in cfg' waiting for all members to be configured in cfg+ let retry+=1 + '[' 6 -ge 15 ']' + echo . . + sleep 10 + [[ 5 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.225.150 mongodb :27017 ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.225.150 ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JuojgrYFsX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.y1WlrlMFVo +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.JuojgrYFsX +++ cat /tmp/tmp.y1WlrlMFVo +++ rm /tmp/tmp.JuojgrYFsX /tmp/tmp.y1WlrlMFVo +++ return 0 ++ local client_container=psmdb-client-696897d69b-8t86g ++ kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DtgHJYBdQ2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.0IrRzNNLrf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DtgHJYBdQ2 ++ cat /tmp/tmp.0IrRzNNLrf ++ rm /tmp/tmp.DtgHJYBdQ2 /tmp/tmp.0IrRzNNLrf ++ return 0 + nodes_count=5 + echo -n 'waiting for all members to be configured in cfg' waiting for all members to be configured in cfg+ let retry+=1 + '[' 7 -ge 15 ']' + echo . . + sleep 10 + [[ 5 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.225.150 mongodb :27017 ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.225.150 ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.F7iRvVAB8l ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zaHsZWudj6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.F7iRvVAB8l +++ cat /tmp/tmp.zaHsZWudj6 +++ rm /tmp/tmp.F7iRvVAB8l /tmp/tmp.zaHsZWudj6 +++ return 0 ++ local client_container=psmdb-client-696897d69b-8t86g ++ kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YqynQkRaB4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.8rObfmh7NW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YqynQkRaB4 ++ cat /tmp/tmp.8rObfmh7NW ++ rm /tmp/tmp.YqynQkRaB4 /tmp/tmp.8rObfmh7NW ++ return 0 + nodes_count=5 + echo -n 'waiting for all members to be configured in cfg' waiting for all members to be configured in cfg+ let retry+=1 + '[' 8 -ge 15 ']' + echo . . + sleep 10 + [[ 5 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.225.150 mongodb :27017 ++ local 'command=rs.conf().members.length' ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.225.150 ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YOmmL55fTY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0KlRAmCU2H +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.YOmmL55fTY +++ cat /tmp/tmp.0KlRAmCU2H +++ rm /tmp/tmp.YOmmL55fTY /tmp/tmp.0KlRAmCU2H +++ return 0 ++ local client_container=psmdb-client-696897d69b-8t86g ++ kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.emzprZyPRT +++ mktemp ++ local LAST_ERR=/tmp/tmp.At0q9E9XVc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.emzprZyPRT ++ cat /tmp/tmp.At0q9E9XVc ++ rm /tmp/tmp.emzprZyPRT /tmp/tmp.At0q9E9XVc ++ return 0 + nodes_count=6 + echo -n 'waiting for all members to be configured in cfg' waiting for all members to be configured in cfg+ let retry+=1 + '[' 9 -ge 15 ']' + echo . . + sleep 10 + [[ 6 == 6 ]] + wait_for_members 34.118.229.83 rs0 6 + local endpoint=34.118.229.83 + local rsName=rs0 + local target_count=6 + local nodes_count=0 + [[ 0 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.229.83 mongodb :27017 ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.229.83 ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ local port=27017 ++ local mongo_bin=mongo ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GUVRMUqwJl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.X5NM83LxmF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.GUVRMUqwJl +++ cat /tmp/tmp.X5NM83LxmF +++ rm /tmp/tmp.GUVRMUqwJl /tmp/tmp.X5NM83LxmF +++ return 0 ++ local client_container=psmdb-client-696897d69b-8t86g ++ kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.229.83:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8opRbvSDmn +++ mktemp ++ local LAST_ERR=/tmp/tmp.eBcNP2aim6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.229.83:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8opRbvSDmn ++ cat /tmp/tmp.eBcNP2aim6 ++ rm /tmp/tmp.8opRbvSDmn /tmp/tmp.eBcNP2aim6 ++ return 0 + nodes_count=6 + echo -n 'waiting for all members to be configured in rs0' waiting for all members to be configured in rs0+ let retry+=1 + '[' 10 -ge 15 ']' + echo . . + sleep 10 + [[ 6 == 6 ]] + wait_for_members 34.118.227.231 rs1 6 + local endpoint=34.118.227.231 + local rsName=rs1 + local target_count=6 + local nodes_count=0 + [[ 0 == 6 ]] ++ run_mongos 'rs.conf().members.length' clusterAdmin:clusterAdmin123456@34.118.227.231 mongodb :27017 ++ local 'command=rs.conf().members.length' ++ local uri=clusterAdmin:clusterAdmin123456@34.118.227.231 ++ local driver=mongodb ++ local suffix=:27017 ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ echo :27017 +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port=27017 ++ [[ -z 27017 ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ekRW0B66Bl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RifCXxMAQT +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ekRW0B66Bl +++ cat /tmp/tmp.RifCXxMAQT +++ rm /tmp/tmp.ekRW0B66Bl /tmp/tmp.RifCXxMAQT +++ return 0 ++ local client_container=psmdb-client-696897d69b-8t86g ++ kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.227.231:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mBbvdJoEEG +++ mktemp ++ local LAST_ERR=/tmp/tmp.VOwLIyDUrB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'printf '\''rs.conf().members.length\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.227.231:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mBbvdJoEEG ++ cat /tmp/tmp.VOwLIyDUrB ++ rm /tmp/tmp.mBbvdJoEEG /tmp/tmp.VOwLIyDUrB ++ return 0 + nodes_count=6 + echo -n 'waiting for all members to be configured in rs1' waiting for all members to be configured in rs1+ let retry+=1 + '[' 11 -ge 15 ']' + echo . . + sleep 10 + [[ 6 == 6 ]] ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.rAyO41bPrf +++ mktemp ++ local LAST_ERR=/tmp/tmp.oP09kBpHHE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rAyO41bPrf ++ cat /tmp/tmp.oP09kBpHHE ++ rm /tmp/tmp.rAyO41bPrf /tmp/tmp.oP09kBpHHE ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2135-b9c4516c-6-cluster3 --namespace=cross-site-sharded-replica-25250 ++ mktemp + local LAST_OUT=/tmp/tmp.yWbYSP0fFC ++ mktemp + local LAST_ERR=/tmp/tmp.Xl8GCfLGbt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2135-b9c4516c-6-cluster3 --namespace=cross-site-sharded-replica-25250 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yWbYSP0fFC Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2135-b9c4516c-6-cluster3" modified. + cat /tmp/tmp.Xl8GCfLGbt + rm /tmp/tmp.yWbYSP0fFC /tmp/tmp.Xl8GCfLGbt + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running cross-site-sharded-replica-rs0 3 + local name=cross-site-sharded-replica-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=cross-site-sharded-replica ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod cross-site-sharded-replica-rs0-0 + local pod=cross-site-sharded-replica-rs0-0 + set +o xtrace waiting for pod/cross-site-sharded-replica-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod cross-site-sharded-replica-rs0-1 + local pod=cross-site-sharded-replica-rs0-1 + set +o xtrace waiting for pod/cross-site-sharded-replica-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kPFhZxEuKD +++ mktemp ++ local LAST_ERR=/tmp/tmp.7mqEKhdy1s ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kPFhZxEuKD ++ cat /tmp/tmp.7mqEKhdy1s ++ rm /tmp/tmp.kPFhZxEuKD /tmp/tmp.7mqEKhdy1s ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod cross-site-sharded-replica-rs0-2 + local pod=cross-site-sharded-replica-rs0-2 + set +o xtrace waiting for pod/cross-site-sharded-replica-rs0-2 to be ready.OK ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.raPn01zWGd +++ mktemp ++ local LAST_ERR=/tmp/tmp.so9fGvDzbl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.raPn01zWGd ++ cat /tmp/tmp.so9fGvDzbl ++ rm /tmp/tmp.raPn01zWGd /tmp/tmp.so9fGvDzbl ++ return 0 + [[ false == \t\r\u\e ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tTXsTyZPV1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.kciMJUTVNm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tTXsTyZPV1 ++ cat /tmp/tmp.kciMJUTVNm ++ rm /tmp/tmp.tTXsTyZPV1 /tmp/tmp.kciMJUTVNm ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running cross-site-sharded-replica-cfg 3 false + local name=cross-site-sharded-replica-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=cross-site-sharded-replica ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod cross-site-sharded-replica-cfg-0 + local pod=cross-site-sharded-replica-cfg-0 + set +o xtrace waiting for pod/cross-site-sharded-replica-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod cross-site-sharded-replica-cfg-1 + local pod=cross-site-sharded-replica-cfg-1 + set +o xtrace waiting for pod/cross-site-sharded-replica-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nmdVr65Abi +++ mktemp ++ local LAST_ERR=/tmp/tmp.pD5aV4UaJd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nmdVr65Abi ++ cat /tmp/tmp.pD5aV4UaJd ++ rm /tmp/tmp.nmdVr65Abi /tmp/tmp.pD5aV4UaJd ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod cross-site-sharded-replica-cfg-2 + local pod=cross-site-sharded-replica-cfg-2 + set +o xtrace waiting for pod/cross-site-sharded-replica-cfg-2 to be ready.OK ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aMtXIGW6wh +++ mktemp ++ local LAST_ERR=/tmp/tmp.Q0zYSnH7O4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aMtXIGW6wh ++ cat /tmp/tmp.Q0zYSnH7O4 ++ rm /tmp/tmp.aMtXIGW6wh /tmp/tmp.Q0zYSnH7O4 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5Tu3rcH8VT +++ mktemp ++ local LAST_ERR=/tmp/tmp.3ajmoCIOXE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cross-site-sharded-replica -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5Tu3rcH8VT ++ cat /tmp/tmp.3ajmoCIOXE ++ rm /tmp/tmp.5Tu3rcH8VT /tmp/tmp.3ajmoCIOXE ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + desc 'create user' + set +o xtrace ----------------------------------------------------------------------------------- create user ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NPGB2b7KOl +++ mktemp ++ local LAST_ERR=/tmp/tmp.c5vU3PUT5U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NPGB2b7KOl ++ cat /tmp/tmp.c5vU3PUT5U ++ rm /tmp/tmp.NPGB2b7KOl /tmp/tmp.c5vU3PUT5U ++ return 0 + local client_container=psmdb-client-696897d69b-7k87l + kubectl_bin exec psmdb-client-696897d69b-7k87l -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.JfCX5JmgBH ++ mktemp + local LAST_ERR=/tmp/tmp.JjqI6k2aRI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7k87l -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JfCX5JmgBH Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("69f4d8bc-0c0e-4291-bfe8-ec4656867d1d") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.JjqI6k2aRI + rm /tmp/tmp.JfCX5JmgBH /tmp/tmp.JjqI6k2aRI + return 0 + sleep 2 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-19485 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-19485 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.P8HAW44gpe +++ mktemp ++ local LAST_ERR=/tmp/tmp.s4AYLiZN9u ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.P8HAW44gpe ++ cat /tmp/tmp.s4AYLiZN9u ++ rm /tmp/tmp.P8HAW44gpe /tmp/tmp.s4AYLiZN9u ++ return 0 + local client_container=psmdb-client-696897d69b-7k87l + kubectl_bin exec psmdb-client-696897d69b-7k87l -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.SZfyIoTUBB ++ mktemp + local LAST_ERR=/tmp/tmp.f7bhFjMBHw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7k87l -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SZfyIoTUBB Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("ea890589-7b61-49d5-8d70-5e34a8dbf078") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.f7bhFjMBHw + rm /tmp/tmp.SZfyIoTUBB /tmp/tmp.f7bhFjMBHw + return 0 + minikube_sleep + sleep_time=10 + [[ '' == 1 ]] + desc 'Compare data' + set +o xtrace ----------------------------------------------------------------------------------- Compare data ----------------------------------------------------------------------------------- + compare_mongos_cmd find myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-19485 + local command=find + local uri=myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-19485 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2025-12-23T13:10:00+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-19485 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-19485 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UbVKBz3KZq +++ mktemp ++ local LAST_ERR=/tmp/tmp.fdjVnPAjdt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UbVKBz3KZq ++ cat /tmp/tmp.fdjVnPAjdt ++ rm /tmp/tmp.UbVKBz3KZq /tmp/tmp.fdjVnPAjdt ++ return 0 + local client_container=psmdb-client-696897d69b-7k87l + kubectl_bin exec psmdb-client-696897d69b-7k87l -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.RU7DIWyWIg ++ mktemp + local LAST_ERR=/tmp/tmp.o0rXKVm1LR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7k87l -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cross-site-sharded-main-mongos.cross-site-sharded-19485.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RU7DIWyWIg + cat /tmp/tmp.o0rXKVm1LR + rm /tmp/tmp.RU7DIWyWIg /tmp/tmp.o0rXKVm1LR + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/compare/find.json /tmp/tmp.eJRyynawK0/find + desc 'test failover' + set +o xtrace ----------------------------------------------------------------------------------- test failover ----------------------------------------------------------------------------------- ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.4plHImnfbh +++ mktemp ++ local LAST_ERR=/tmp/tmp.EqluO1ael4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4plHImnfbh ++ cat /tmp/tmp.EqluO1ael4 ++ rm /tmp/tmp.4plHImnfbh /tmp/tmp.EqluO1ael4 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2135-b9c4516c-6-cluster3 --namespace=cross-site-sharded-19485 ++ mktemp + local LAST_OUT=/tmp/tmp.qiHJPUAsGn ++ mktemp + local LAST_ERR=/tmp/tmp.wPikBOqIek + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2135-b9c4516c-6-cluster3 --namespace=cross-site-sharded-19485 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qiHJPUAsGn Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2135-b9c4516c-6-cluster3" modified. + cat /tmp/tmp.wPikBOqIek + rm /tmp/tmp.qiHJPUAsGn /tmp/tmp.wPikBOqIek + return 0 + kubectl_bin delete psmdb cross-site-sharded-main ++ mktemp + local LAST_OUT=/tmp/tmp.HEAwjpSC77 ++ mktemp + local LAST_ERR=/tmp/tmp.WgfLv7YQqU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb cross-site-sharded-main + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HEAwjpSC77 perconaservermongodb.psmdb.percona.com "cross-site-sharded-main" deleted from cross-site-sharded-19485 namespace + cat /tmp/tmp.WgfLv7YQqU + rm /tmp/tmp.HEAwjpSC77 /tmp/tmp.WgfLv7YQqU + return 0 + desc 'run disaster recovery script for replset: cfg' + set +o xtrace ----------------------------------------------------------------------------------- run disaster recovery script for replset: cfg ----------------------------------------------------------------------------------- + run_script_mongos /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/disaster_recovery.js clusterAdmin:clusterAdmin123456@34.118.225.150 mongodb :27017 + local script=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/disaster_recovery.js + local uri=clusterAdmin:clusterAdmin123456@34.118.225.150 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local mongo_bin=mongo ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wSNu1Z1DSZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.fV2E3EJwuh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wSNu1Z1DSZ ++ cat /tmp/tmp.fV2E3EJwuh ++ rm /tmp/tmp.wSNu1Z1DSZ /tmp/tmp.fV2E3EJwuh ++ return 0 + local client_container=psmdb-client-696897d69b-8t86g ++ basename /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/disaster_recovery.js + name=disaster_recovery.js + kubectl_bin cp /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/disaster_recovery.js cross-site-sharded-19485/psmdb-client-696897d69b-8t86g:/tmp ++ mktemp + local LAST_OUT=/tmp/tmp.EO7sUJXzfA ++ mktemp + local LAST_ERR=/tmp/tmp.wUyCYAjjTP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl cp /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2135/e2e-tests/cross-site-sharded/disaster_recovery.js cross-site-sharded-19485/psmdb-client-696897d69b-8t86g:/tmp + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EO7sUJXzfA + cat /tmp/tmp.wUyCYAjjTP + rm /tmp/tmp.EO7sUJXzfA /tmp/tmp.wUyCYAjjTP + return 0 + kubectl_bin exec psmdb-client-696897d69b-8t86g -- bash -c 'mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin /tmp/disaster_recovery.js' ++ mktemp + local LAST_OUT=/tmp/tmp.vfTo05KAvI ++ mktemp + local LAST_ERR=/tmp/tmp.N93LwCb0mm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin /tmp/disaster_recovery.js' + exit_status=253 + set -e + '[' 253 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.vfTo05KAvI Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.225.150:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("3fe3a5bd-d647-4502-a0f4-a1ab0ff4339b") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match uncaught exception: TypeError: member3 is undefined : @/tmp/disaster_recovery.js:12:1 failed to load: /tmp/disaster_recovery.js exiting with code -3 + cat /tmp/tmp.N93LwCb0mm command terminated with exit code 253 + sleep 0 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin /tmp/disaster_recovery.js' + exit_status=253 + set -e + '[' 253 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.vfTo05KAvI Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.225.150:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("14e2c4f4-6b19-41a3-9372-8edb8d5cc9c3") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match uncaught exception: TypeError: member3 is undefined : @/tmp/disaster_recovery.js:12:1 failed to load: /tmp/disaster_recovery.js exiting with code -3 + cat /tmp/tmp.N93LwCb0mm command terminated with exit code 253 + sleep 4 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-8t86g -- bash -c 'mongo mongodb://clusterAdmin:clusterAdmin123456@34.118.225.150:27017/admin /tmp/disaster_recovery.js' + exit_status=253 + set -e + '[' 253 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.vfTo05KAvI Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.225.150:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("c1970d00-0ec0-4d9c-a0e7-e48bf6d9811e") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match uncaught exception: TypeError: member3 is undefined : @/tmp/disaster_recovery.js:12:1 failed to load: /tmp/disaster_recovery.js exiting with code -3 + cat /tmp/tmp.N93LwCb0mm command terminated with exit code 253 + sleep 8 + cat /tmp/tmp.vfTo05KAvI Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.225.150:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("c1970d00-0ec0-4d9c-a0e7-e48bf6d9811e") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match uncaught exception: TypeError: member3 is undefined : @/tmp/disaster_recovery.js:12:1 failed to load: /tmp/disaster_recovery.js exiting with code -3 + cat /tmp/tmp.N93LwCb0mm command terminated with exit code 253 + rm /tmp/tmp.vfTo05KAvI /tmp/tmp.N93LwCb0mm + return 253