Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/e2e-tests/logs/data-sharded.log WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 + main + [[ perconalab/percona-server-mongodb-operator:main-mongod7.0 == *\p\e\r\c\o\n\a\-\s\e\r\v\e\r\-\m\o\n\g\o\d\b\-\o\p\e\r\a\t\o\r* ]] ++ /usr/bin/sed -r 's/.*([0-9].[0-9])$/\1/' ++ echo -n perconalab/percona-server-mongodb-operator:main-mongod7.0 + MONGO_VER=7.0 + create_infra data-sharded-16822 + local ns=data-sharded-16822 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.ZZhnyjbSc2 ++ mktemp + local LAST_ERR=/tmp/tmp.cMFLN728Py + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZZhnyjbSc2 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.cMFLN728Py + rm /tmp/tmp.ZZhnyjbSc2 /tmp/tmp.cMFLN728Py + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.bjhC9GmD0E ++ mktemp + local LAST_ERR=/tmp/tmp.g6XNDJ9ewz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bjhC9GmD0E + cat /tmp/tmp.g6XNDJ9ewz + rm /tmp/tmp.bjhC9GmD0E /tmp/tmp.g6XNDJ9ewz + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.aNz4GQPX50 ++ mktemp + local LAST_ERR=/tmp/tmp.oKXYtxMODN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aNz4GQPX50 + cat /tmp/tmp.oKXYtxMODN + rm /tmp/tmp.aNz4GQPX50 /tmp/tmp.oKXYtxMODN + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.UTmrdQflLm ++ mktemp + local LAST_ERR=/tmp/tmp.I159X2pMoD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UTmrdQflLm + cat /tmp/tmp.I159X2pMoD + rm /tmp/tmp.UTmrdQflLm /tmp/tmp.I159X2pMoD + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.GBdurCRxX8 ++ mktemp + local LAST_ERR=/tmp/tmp.or04gJoocB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GBdurCRxX8 clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.or04gJoocB + rm /tmp/tmp.GBdurCRxX8 /tmp/tmp.or04gJoocB + return 0 + check_crd_for_deletion PR-1545-63b8c179 + local git_tag=PR-1545-63b8c179 ++ yq eval .metadata.name ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1545-63b8c179/deploy/crd.yaml ++ /usr/bin/sed s/---//g ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HGdE9CxBfW +++ mktemp ++ local LAST_ERR=/tmp/tmp.6HORRzqX8J ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.HGdE9CxBfW ++ cat /tmp/tmp.6HORRzqX8J Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.HGdE9CxBfW ++ cat /tmp/tmp.6HORRzqX8J Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.HGdE9CxBfW ++ cat /tmp/tmp.6HORRzqX8J Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.HGdE9CxBfW ++ cat /tmp/tmp.6HORRzqX8J Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.HGdE9CxBfW /tmp/tmp.6HORRzqX8J ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ sed s/NAMESPACE// ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + awk '{print$1}' + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + local LAST_OUT=/tmp/tmp.dNffnS5mo8 + xargs kubectl delete ns + kubectl_bin get ns ++ mktemp + local LAST_ERR=/tmp/tmp.35R0GMV9WG + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + local LAST_OUT=/tmp/tmp.6h2pnobNfX ++ mktemp + local LAST_ERR=/tmp/tmp.rYuYJTVonA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6h2pnobNfX + cat /tmp/tmp.rYuYJTVonA + rm /tmp/tmp.6h2pnobNfX /tmp/tmp.rYuYJTVonA + return 0 namespace "cert-manager" deleted namespace "data-sharded-28317" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dNffnS5mo8 namespace "psmdb-operator" deleted + cat /tmp/tmp.35R0GMV9WG + rm /tmp/tmp.dNffnS5mo8 /tmp/tmp.35R0GMV9WG + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.cq0CaAJoNn ++ mktemp + local LAST_ERR=/tmp/tmp.3OrEU1A28z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cq0CaAJoNn + cat /tmp/tmp.3OrEU1A28z + rm /tmp/tmp.cq0CaAJoNn /tmp/tmp.3OrEU1A28z + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.s7xv1vgDQ4 ++ mktemp + local LAST_ERR=/tmp/tmp.K1Y6H9osj0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.s7xv1vgDQ4 namespace/psmdb-operator created + cat /tmp/tmp.K1Y6H9osj0 + rm /tmp/tmp.s7xv1vgDQ4 /tmp/tmp.K1Y6H9osj0 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.anpeM8bCY0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.zUng5IJoTa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.anpeM8bCY0 ++ cat /tmp/tmp.zUng5IJoTa ++ rm /tmp/tmp.anpeM8bCY0 /tmp/tmp.zUng5IJoTa ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1545-63b8c179-7-cluster6 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.FpB9h4a8Yt ++ mktemp + local LAST_ERR=/tmp/tmp.YvGmSzrpz4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1545-63b8c179-7-cluster6 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FpB9h4a8Yt Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1545-63b8c179-7-cluster6" modified. + cat /tmp/tmp.YvGmSzrpz4 + rm /tmp/tmp.FpB9h4a8Yt /tmp/tmp.YvGmSzrpz4 + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/e2e-tests/data-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.GH9UKKobrW ++ mktemp + local LAST_ERR=/tmp/tmp.Ms7OWNhnSm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GH9UKKobrW customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.Ms7OWNhnSm + rm /tmp/tmp.GH9UKKobrW /tmp/tmp.Ms7OWNhnSm + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.0hLlZIwTZR ++ mktemp + local LAST_ERR=/tmp/tmp.4RYcW1qR36 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0hLlZIwTZR clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.4RYcW1qR36 + rm /tmp/tmp.0hLlZIwTZR /tmp/tmp.4RYcW1qR36 + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1545-63b8c179") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.wnws1kIOHi ++ mktemp + local LAST_ERR=/tmp/tmp.taOr899Ke0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wnws1kIOHi deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.taOr899Ke0 + rm /tmp/tmp.wnws1kIOHi /tmp/tmp.taOr899Ke0 + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.MWy4kfjDYu +++ mktemp ++ local LAST_ERR=/tmp/tmp.egCe6aqSAR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MWy4kfjDYu ++ cat /tmp/tmp.egCe6aqSAR ++ rm /tmp/tmp.MWy4kfjDYu /tmp/tmp.egCe6aqSAR ++ return 0 + wait_pod percona-server-mongodb-operator-76d59f67c-j5b49 + local pod=percona-server-mongodb-operator-76d59f67c-j5b49 + set +o xtrace waiting for pod/percona-server-mongodb-operator-76d59f67c-j5b49 to be ready.OK + create_namespace data-sharded-16822 + local namespace=data-sharded-16822 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ tail -n1 ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get clusterrolebinding ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get clusterrole ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces data-sharded-16822' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces data-sharded-16822 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace data-sharded-16822 --ignore-not-found ++ mktemp + kubectl_bin get ns ++ mktemp + xargs kubectl delete ns + awk '{print$1}' + local LAST_OUT=/tmp/tmp.jQZeHhuzEw + local LAST_OUT=/tmp/tmp.Sr1GvoBIWP ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.nGzguckGE8 + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.Fm2v8iTZxn + local exit_status=0 + local timeout=4 + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace data-sharded-16822 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jQZeHhuzEw + cat /tmp/tmp.nGzguckGE8 + rm /tmp/tmp.jQZeHhuzEw /tmp/tmp.nGzguckGE8 + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Sr1GvoBIWP + cat /tmp/tmp.Fm2v8iTZxn + rm /tmp/tmp.Sr1GvoBIWP /tmp/tmp.Fm2v8iTZxn + return 0 + kubectl_bin wait --for=delete namespace data-sharded-16822 ++ mktemp + local LAST_OUT=/tmp/tmp.YHhrsb9prY ++ mktemp + local LAST_ERR=/tmp/tmp.xm5PUTaVsp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace data-sharded-16822 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YHhrsb9prY + cat /tmp/tmp.xm5PUTaVsp + rm /tmp/tmp.YHhrsb9prY /tmp/tmp.xm5PUTaVsp + return 0 + desc 'create namespace data-sharded-16822' + set +o xtrace ----------------------------------------------------------------------------------- create namespace data-sharded-16822 ----------------------------------------------------------------------------------- + kubectl_bin create namespace data-sharded-16822 ++ mktemp + local LAST_OUT=/tmp/tmp.KJWIaCxkan ++ mktemp + local LAST_ERR=/tmp/tmp.X75MRAmRow + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace data-sharded-16822 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KJWIaCxkan namespace/data-sharded-16822 created + cat /tmp/tmp.X75MRAmRow + rm /tmp/tmp.KJWIaCxkan /tmp/tmp.X75MRAmRow + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.v9e1uQq2lg +++ mktemp ++ local LAST_ERR=/tmp/tmp.FsaI3c17gO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v9e1uQq2lg ++ cat /tmp/tmp.FsaI3c17gO ++ rm /tmp/tmp.v9e1uQq2lg /tmp/tmp.FsaI3c17gO ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1545-63b8c179-7-cluster6 --namespace=data-sharded-16822 ++ mktemp + local LAST_OUT=/tmp/tmp.zhmzVeXjws ++ mktemp + local LAST_ERR=/tmp/tmp.hzfJrtJSK8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1545-63b8c179-7-cluster6 --namespace=data-sharded-16822 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zhmzVeXjws Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1545-63b8c179-7-cluster6" modified. + cat /tmp/tmp.hzfJrtJSK8 + rm /tmp/tmp.zhmzVeXjws /tmp/tmp.hzfJrtJSK8 + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.JAkmsj5IPS ++ mktemp + local LAST_ERR=/tmp/tmp.dzH54h9xwe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JAkmsj5IPS namespace/cert-manager created + cat /tmp/tmp.dzH54h9xwe + rm /tmp/tmp.JAkmsj5IPS /tmp/tmp.dzH54h9xwe + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.1feAovxpwx ++ mktemp + local LAST_ERR=/tmp/tmp.thsGJkz4WU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1feAovxpwx namespace/cert-manager labeled + cat /tmp/tmp.thsGJkz4WU + rm /tmp/tmp.1feAovxpwx /tmp/tmp.thsGJkz4WU + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.4/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.0TdZgyOZpM ++ mktemp + local LAST_ERR=/tmp/tmp.Yb276YpGYm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.4/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0TdZgyOZpM namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created configmap/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews configured role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection configured rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.Yb276YpGYm Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.0TdZgyOZpM /tmp/tmp.Yb276YpGYm + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.T70JulRUTB ++ mktemp + local LAST_ERR=/tmp/tmp.0rNR4DaZv7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.T70JulRUTB pod/cert-manager-6b8456bfd4-47css condition met pod/cert-manager-cainjector-79495bdbc8-bmjwb condition met pod/cert-manager-webhook-56fc7669b6-wfxdx condition met + cat /tmp/tmp.0rNR4DaZv7 + rm /tmp/tmp.T70JulRUTB /tmp/tmp.0rNR4DaZv7 + return 0 + sleep 120 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.hhjUL82LB8 ++ mktemp + local LAST_ERR=/tmp/tmp.dqgKHYsXZ8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hhjUL82LB8 secret/some-users created + cat /tmp/tmp.dqgKHYsXZ8 + rm /tmp/tmp.hhjUL82LB8 /tmp/tmp.dqgKHYsXZ8 + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.i01PovgY7d ++ mktemp + local LAST_ERR=/tmp/tmp.z6eGPof1xa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/e2e-tests/conf/client_with_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.i01PovgY7d deployment.apps/psmdb-client created + cat /tmp/tmp.z6eGPof1xa + rm /tmp/tmp.i01PovgY7d /tmp/tmp.z6eGPof1xa + return 0 + cluster=some-name + desc 'create first PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/e2e-tests/data-sharded/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/e2e-tests/data-sharded/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/e2e-tests/data-sharded/conf/some-name.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + local LAST_OUT=/tmp/tmp.OsUoeRpn14 + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + local LAST_ERR=/tmp/tmp.NQhOo9I7oJ + local exit_status=0 + local timeout=4 + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1545-63b8c179"' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OsUoeRpn14 perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.NQhOo9I7oJ + rm /tmp/tmp.OsUoeRpn14 /tmp/tmp.NQhOo9I7oJ + return 0 + desc 'check if all Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready..........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready...........OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lRbAgTCcDi +++ mktemp ++ local LAST_ERR=/tmp/tmp.PxMCXlQ71a ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lRbAgTCcDi ++ cat /tmp/tmp.PxMCXlQ71a ++ rm /tmp/tmp.lRbAgTCcDi /tmp/tmp.PxMCXlQ71a ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready...........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TWwIxwLls7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.fF32d17AN3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TWwIxwLls7 ++ cat /tmp/tmp.fF32d17AN3 ++ rm /tmp/tmp.TWwIxwLls7 /tmp/tmp.fF32d17AN3 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness......................................... + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lt7zHJT7Pr +++ mktemp ++ local LAST_ERR=/tmp/tmp.VpgBSBJzoR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lt7zHJT7Pr ++ cat /tmp/tmp.VpgBSBJzoR ++ rm /tmp/tmp.lt7zHJT7Pr /tmp/tmp.VpgBSBJzoR ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TnNUzKzvQH +++ mktemp ++ local LAST_ERR=/tmp/tmp.gHWDYaL5gK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TnNUzKzvQH ++ cat /tmp/tmp.gHWDYaL5gK ++ rm /tmp/tmp.TnNUzKzvQH /tmp/tmp.gHWDYaL5gK ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-rs1 3 + local name=some-name-rs1 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs1 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs1-0 + local pod=some-name-rs1-0 + set +o xtrace waiting for pod/some-name-rs1-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs1-1 + local pod=some-name-rs1-1 + set +o xtrace waiting for pod/some-name-rs1-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yyMzgj9s1g +++ mktemp ++ local LAST_ERR=/tmp/tmp.AjQbj9H0rH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yyMzgj9s1g ++ cat /tmp/tmp.AjQbj9H0rH ++ rm /tmp/tmp.yyMzgj9s1g /tmp/tmp.AjQbj9H0rH ++ return 0 + [[ true == \t\r\u\e ]] + wait_pod some-name-rs1-arbiter-0 + local pod=some-name-rs1-arbiter-0 + set +o xtrace waiting for pod/some-name-rs1-arbiter-0 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AAGIPxmN7B +++ mktemp ++ local LAST_ERR=/tmp/tmp.m8vd3HQDue ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AAGIPxmN7B ++ cat /tmp/tmp.m8vd3HQDue ++ rm /tmp/tmp.AAGIPxmN7B /tmp/tmp.m8vd3HQDue ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-rs2 3 + local name=some-name-rs2 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs2 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs2-0 + local pod=some-name-rs2-0 + set +o xtrace waiting for pod/some-name-rs2-0 to be ready...................................................................OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs2-1 + local pod=some-name-rs2-1 + set +o xtrace waiting for pod/some-name-rs2-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zpLM5V03QK +++ mktemp ++ local LAST_ERR=/tmp/tmp.RzmhsZc0Mh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zpLM5V03QK ++ cat /tmp/tmp.RzmhsZc0Mh ++ rm /tmp/tmp.zpLM5V03QK /tmp/tmp.RzmhsZc0Mh ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs2-2 + local pod=some-name-rs2-2 + set +o xtrace waiting for pod/some-name-rs2-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yiFxAnCAqK +++ mktemp ++ local LAST_ERR=/tmp/tmp.onKKUBkBqV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yiFxAnCAqK ++ cat /tmp/tmp.onKKUBkBqV ++ rm /tmp/tmp.yiFxAnCAqK /tmp/tmp.onKKUBkBqV ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YUVUKjX5tI +++ mktemp ++ local LAST_ERR=/tmp/tmp.6e73aQX1xT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YUVUKjX5tI ++ cat /tmp/tmp.6e73aQX1xT ++ rm /tmp/tmp.YUVUKjX5tI /tmp/tmp.6e73aQX1xT ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dYADXJonyQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.mr2O3wYeDh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dYADXJonyQ ++ cat /tmp/tmp.mr2O3wYeDh ++ rm /tmp/tmp.dYADXJonyQ /tmp/tmp.mr2O3wYeDh ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + desc 'create user' + set +o xtrace ----------------------------------------------------------------------------------- create user ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"user",pwd:"pass",roles:[{db:"app",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.data-sharded-16822 mongodb .svc.cluster.local '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=db.createUser({user:"user",pwd:"pass",roles:[{db:"app",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.data-sharded-16822 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.av6hn2C0j9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZIKV7OO6YX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.av6hn2C0j9 ++ cat /tmp/tmp.ZIKV7OO6YX ++ rm /tmp/tmp.av6hn2C0j9 /tmp/tmp.ZIKV7OO6YX ++ return 0 + local client_container=psmdb-client-5f578b7f94-fwqwf + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'printf '\''db.createUser({user:"user",pwd:"pass",roles:[{db:"app",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.ju7kBfwk4D ++ mktemp + local LAST_ERR=/tmp/tmp.5p2cSDXQZW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'printf '\''db.createUser({user:"user",pwd:"pass",roles:[{db:"app",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ju7kBfwk4D Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.data-sharded-16822.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-05-07T18:18:03.017Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("7247ce29-a7f7-4bfa-9e2d-3724db2086de") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match Successfully added user: { "user" : "user", "roles" : [ { "db" : "app", "role" : "readWrite" } ] } bye + cat /tmp/tmp.5p2cSDXQZW + rm /tmp/tmp.ju7kBfwk4D /tmp/tmp.5p2cSDXQZW + return 0 + sleep 2 + desc 'set chunk size to 32 MB' + set +o xtrace ----------------------------------------------------------------------------------- set chunk size to 32 MB ----------------------------------------------------------------------------------- + run_mongos 'use config\n db.settings.save( { _id:"chunksize", value: 32 } )' clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822 mongodb .svc.cluster.local '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=use config\n db.settings.save( { _id:"chunksize", value: 32 } )' + local uri=clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.a0P30Mq2Pn +++ mktemp ++ local LAST_ERR=/tmp/tmp.e0TpkNREHa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.a0P30Mq2Pn ++ cat /tmp/tmp.e0TpkNREHa ++ rm /tmp/tmp.a0P30Mq2Pn /tmp/tmp.e0TpkNREHa ++ return 0 + local client_container=psmdb-client-5f578b7f94-fwqwf + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'printf '\''use config\n db.settings.save( { _id:"chunksize", value: 32 } )\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.RxxHFFSWWR ++ mktemp + local LAST_ERR=/tmp/tmp.ojYtOnwIzo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'printf '\''use config\n db.settings.save( { _id:"chunksize", value: 32 } )\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RxxHFFSWWR Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.data-sharded-16822.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-05-07T18:18:08.459Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("fa1a3ed8-908b-4e48-b0c0-477bc94399b6") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match switched to db config WriteResult({ "nMatched" : 0, "nUpserted" : 1, "nModified" : 0, "_id" : "chunksize" }) bye + cat /tmp/tmp.ojYtOnwIzo + rm /tmp/tmp.RxxHFFSWWR /tmp/tmp.ojYtOnwIzo + return 0 + sleep 2 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + run_script_mongos /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/e2e-tests/data-sharded/data.js user:pass@some-name-mongos.data-sharded-16822 mongodb .svc.cluster.local '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local script=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/e2e-tests/data-sharded/data.js + local uri=user:pass@some-name-mongos.data-sharded-16822 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jhTpecSCbw +++ mktemp ++ local LAST_ERR=/tmp/tmp.3H4VPopRrN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jhTpecSCbw ++ cat /tmp/tmp.3H4VPopRrN ++ rm /tmp/tmp.jhTpecSCbw /tmp/tmp.3H4VPopRrN ++ return 0 + local client_container=psmdb-client-5f578b7f94-fwqwf + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ basename /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/e2e-tests/data-sharded/data.js + name=data.js + kubectl_bin cp /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/e2e-tests/data-sharded/data.js data-sharded-16822/psmdb-client-5f578b7f94-fwqwf:/tmp ++ mktemp + local LAST_OUT=/tmp/tmp.0GIxHuaNGc ++ mktemp + local LAST_ERR=/tmp/tmp.Ynfq9WCxL9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl cp /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/e2e-tests/data-sharded/data.js data-sharded-16822/psmdb-client-5f578b7f94-fwqwf:/tmp + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0GIxHuaNGc + cat /tmp/tmp.Ynfq9WCxL9 + rm /tmp/tmp.0GIxHuaNGc /tmp/tmp.Ynfq9WCxL9 + return 0 + kubectl_bin exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'mongo mongodb://user:pass@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls /tmp/data.js' ++ mktemp + local LAST_OUT=/tmp/tmp.aIPXyUFJKg ++ mktemp + local LAST_ERR=/tmp/tmp.7dfLq6Ci6d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'mongo mongodb://user:pass@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls /tmp/data.js' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aIPXyUFJKg Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.data-sharded-16822.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-05-07T18:18:17.019Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("e2a8cb5e-4caa-4dc0-b081-7b410721a66d") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match + cat /tmp/tmp.7dfLq6Ci6d + rm /tmp/tmp.aIPXyUFJKg /tmp/tmp.7dfLq6Ci6d + return 0 + desc 'shard collection' + set +o xtrace ----------------------------------------------------------------------------------- shard collection ----------------------------------------------------------------------------------- + run_mongos 'sh.enableSharding("app")' clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822 mongodb .svc.cluster.local '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=sh.enableSharding("app")' + local uri=clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kjaevzmkxz +++ mktemp ++ local LAST_ERR=/tmp/tmp.wWrEEQt0Tm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kjaevzmkxz ++ cat /tmp/tmp.wWrEEQt0Tm ++ rm /tmp/tmp.kjaevzmkxz /tmp/tmp.wWrEEQt0Tm ++ return 0 + local client_container=psmdb-client-5f578b7f94-fwqwf + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'printf '\''sh.enableSharding("app")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.LfOe7jHRZc ++ mktemp + local LAST_ERR=/tmp/tmp.ai76NVYKkp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'printf '\''sh.enableSharding("app")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LfOe7jHRZc Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.data-sharded-16822.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-05-07T18:24:12.798Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("06dd70b3-e011-4fcd-9250-34dd03019ad5") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1715106252, 2), "signature" : { "hash" : BinData(0,"unaebWjqypGPFWB7LJLVzPQNjgU="), "keyId" : NumberLong("7366322220668289040") } }, "operationTime" : Timestamp(1715106252, 2) } bye + cat /tmp/tmp.ai76NVYKkp + rm /tmp/tmp.LfOe7jHRZc /tmp/tmp.ai76NVYKkp + return 0 + sleep 2 + run_mongos 'sh.shardCollection("app.city", { _id: 1 } )' clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822 mongodb .svc.cluster.local '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=sh.shardCollection("app.city", { _id: 1 } )' + local uri=clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZFbG1MZIuD +++ mktemp ++ local LAST_ERR=/tmp/tmp.VNVADYz0pG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZFbG1MZIuD ++ cat /tmp/tmp.VNVADYz0pG ++ rm /tmp/tmp.ZFbG1MZIuD /tmp/tmp.VNVADYz0pG ++ return 0 + local client_container=psmdb-client-5f578b7f94-fwqwf + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'printf '\''sh.shardCollection("app.city", { _id: 1 } )\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.nWK2emaYSn ++ mktemp + local LAST_ERR=/tmp/tmp.LAs6whdIWc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'printf '\''sh.shardCollection("app.city", { _id: 1 } )\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nWK2emaYSn Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.data-sharded-16822.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-05-07T18:24:18.708Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("66a2f1e6-66ca-4833-8e86-acae6099f564") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match { "collectionsharded" : "app.city", "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1715106259, 5), "signature" : { "hash" : BinData(0,"fIZonGLc/wZL6idvXzadncqq6MI="), "keyId" : NumberLong("7366322220668289040") } }, "operationTime" : Timestamp(1715106259, 5) } bye + cat /tmp/tmp.LAs6whdIWc + rm /tmp/tmp.nWK2emaYSn /tmp/tmp.LAs6whdIWc + return 0 + sleep 120 + desc 'check chunks' + set +o xtrace ----------------------------------------------------------------------------------- check chunks ----------------------------------------------------------------------------------- + chunks_param1=ns + chunks_param2='"app.city"' + [[ 7.0 != \4\.\4 ]] + chunks_param1=uuid ++ run_mongos 'use app\n db.getCollectionInfos({ "name": "city" })[0].info.uuid' user:pass@some-name-mongos.data-sharded-16822 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ local 'command=use app\n db.getCollectionInfos({ "name": "city" })[0].info.uuid' ++ grep 'switched to db app' -A 1 ++ local uri=user:pass@some-name-mongos.data-sharded-16822 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ grep -v 'switched to db app' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3x0pci63WS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VkYlvM2Rih +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.3x0pci63WS +++ cat /tmp/tmp.VkYlvM2Rih +++ rm /tmp/tmp.3x0pci63WS /tmp/tmp.VkYlvM2Rih +++ return 0 ++ local client_container=psmdb-client-5f578b7f94-fwqwf ++ local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ kubectl_bin exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'printf '\''use app\n db.getCollectionInfos({ "name": "city" })[0].info.uuid\n'\'' | mongo mongodb://user:pass@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uh4Q0LYDh5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.jnlbllsIk9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'printf '\''use app\n db.getCollectionInfos({ "name": "city" })[0].info.uuid\n'\'' | mongo mongodb://user:pass@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uh4Q0LYDh5 ++ cat /tmp/tmp.jnlbllsIk9 ++ rm /tmp/tmp.uh4Q0LYDh5 /tmp/tmp.jnlbllsIk9 ++ return 0 + chunks_param2='UUID("ebdb8c16-8a86-4a4d-8458-5c8351ae3aed")' + shards=0 + for i in '"rs0"' '"rs1"' '"rs2"' ++ run_mongos 'use config\n db.chunks.count({"uuid": UUID("ebdb8c16-8a86-4a4d-8458-5c8351ae3aed"), "shard": "rs0"})' clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822 mongodb .svc.cluster.local '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ grep 'switched to db config' -A 1 ++ local 'command=use config\n db.chunks.count({"uuid": UUID("ebdb8c16-8a86-4a4d-8458-5c8351ae3aed"), "shard": "rs0"})' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ grep -v 'switched to db config' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WpURJNtxem ++++ mktemp +++ local LAST_ERR=/tmp/tmp.WwDu79yu5V +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.WpURJNtxem +++ cat /tmp/tmp.WwDu79yu5V +++ rm /tmp/tmp.WpURJNtxem /tmp/tmp.WwDu79yu5V +++ return 0 ++ local client_container=psmdb-client-5f578b7f94-fwqwf ++ local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ kubectl_bin exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'printf '\''use config\n db.chunks.count({"uuid": UUID("ebdb8c16-8a86-4a4d-8458-5c8351ae3aed"), "shard": "rs0"})\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IojTMqDXOQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.Lz71ZocyYY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'printf '\''use config\n db.chunks.count({"uuid": UUID("ebdb8c16-8a86-4a4d-8458-5c8351ae3aed"), "shard": "rs0"})\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IojTMqDXOQ ++ cat /tmp/tmp.Lz71ZocyYY ++ rm /tmp/tmp.IojTMqDXOQ /tmp/tmp.Lz71ZocyYY ++ return 0 + out=1 + desc 'rs0 has 1 chunks' + set +o xtrace ----------------------------------------------------------------------------------- rs0 has 1 chunks ----------------------------------------------------------------------------------- + [[ 1 -ne 0 ]] + (( shards = shards + 1 )) + for i in '"rs0"' '"rs1"' '"rs2"' ++ run_mongos 'use config\n db.chunks.count({"uuid": UUID("ebdb8c16-8a86-4a4d-8458-5c8351ae3aed"), "shard": "rs1"})' clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822 mongodb .svc.cluster.local '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ grep 'switched to db config' -A 1 ++ local 'command=use config\n db.chunks.count({"uuid": UUID("ebdb8c16-8a86-4a4d-8458-5c8351ae3aed"), "shard": "rs1"})' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ grep -v 'switched to db config' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.D89k25w5ns ++++ mktemp +++ local LAST_ERR=/tmp/tmp.38sye743VH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.D89k25w5ns +++ cat /tmp/tmp.38sye743VH +++ rm /tmp/tmp.D89k25w5ns /tmp/tmp.38sye743VH +++ return 0 ++ local client_container=psmdb-client-5f578b7f94-fwqwf ++ local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ kubectl_bin exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'printf '\''use config\n db.chunks.count({"uuid": UUID("ebdb8c16-8a86-4a4d-8458-5c8351ae3aed"), "shard": "rs1"})\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZpACEr2Q5h +++ mktemp ++ local LAST_ERR=/tmp/tmp.HrmihQpCpM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'printf '\''use config\n db.chunks.count({"uuid": UUID("ebdb8c16-8a86-4a4d-8458-5c8351ae3aed"), "shard": "rs1"})\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZpACEr2Q5h ++ cat /tmp/tmp.HrmihQpCpM ++ rm /tmp/tmp.ZpACEr2Q5h /tmp/tmp.HrmihQpCpM ++ return 0 + out=1 + desc 'rs1 has 1 chunks' + set +o xtrace ----------------------------------------------------------------------------------- rs1 has 1 chunks ----------------------------------------------------------------------------------- + [[ 1 -ne 0 ]] + (( shards = shards + 1 )) + for i in '"rs0"' '"rs1"' '"rs2"' ++ run_mongos 'use config\n db.chunks.count({"uuid": UUID("ebdb8c16-8a86-4a4d-8458-5c8351ae3aed"), "shard": "rs2"})' clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822 mongodb .svc.cluster.local '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ grep 'switched to db config' -A 1 ++ local 'command=use config\n db.chunks.count({"uuid": UUID("ebdb8c16-8a86-4a4d-8458-5c8351ae3aed"), "shard": "rs2"})' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822 ++ local driver=mongodb ++ grep -v 'switched to db config' ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pq4eQ0Lecu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.96H0XUFzJn +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.pq4eQ0Lecu +++ cat /tmp/tmp.96H0XUFzJn +++ rm /tmp/tmp.pq4eQ0Lecu /tmp/tmp.96H0XUFzJn +++ return 0 ++ local client_container=psmdb-client-5f578b7f94-fwqwf ++ local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ kubectl_bin exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'printf '\''use config\n db.chunks.count({"uuid": UUID("ebdb8c16-8a86-4a4d-8458-5c8351ae3aed"), "shard": "rs2"})\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PzIVllWlaS +++ mktemp ++ local LAST_ERR=/tmp/tmp.0SUOkJzY5y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'printf '\''use config\n db.chunks.count({"uuid": UUID("ebdb8c16-8a86-4a4d-8458-5c8351ae3aed"), "shard": "rs2"})\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PzIVllWlaS ++ cat /tmp/tmp.0SUOkJzY5y ++ rm /tmp/tmp.PzIVllWlaS /tmp/tmp.0SUOkJzY5y ++ return 0 + out=1 + desc 'rs2 has 1 chunks' + set +o xtrace ----------------------------------------------------------------------------------- rs2 has 1 chunks ----------------------------------------------------------------------------------- + [[ 1 -ne 0 ]] + (( shards = shards + 1 )) + [[ 3 -lt 3 ]] ++ run_mongos 'use app\n db.dropDatabase()' clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822 mongodb .svc.cluster.local '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ local 'command=use app\n db.dropDatabase()' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822 ++ local driver=mongodb ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gXRhBJntoy ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CKSacusEHi +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.gXRhBJntoy +++ cat /tmp/tmp.CKSacusEHi +++ rm /tmp/tmp.gXRhBJntoy /tmp/tmp.CKSacusEHi +++ return 0 ++ local client_container=psmdb-client-5f578b7f94-fwqwf ++ local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ kubectl_bin exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'printf '\''use app\n db.dropDatabase()\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q4asOTmSKd +++ mktemp ++ local LAST_ERR=/tmp/tmp.DbcvP3S6pv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-5f578b7f94-fwqwf -- bash -c 'printf '\''use app\n db.dropDatabase()\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.data-sharded-16822.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q4asOTmSKd ++ cat /tmp/tmp.DbcvP3S6pv ++ rm /tmp/tmp.q4asOTmSKd /tmp/tmp.DbcvP3S6pv ++ return 0 + res='Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.data-sharded-16822.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-05-07T18:26:31.857Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("626f31f8-34d2-4e33-875a-06d3bc2c7097") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match switched to db app { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1715106392, 43), "signature" : { "hash" : BinData(0,"U+RQO+x/703kG7NVlD/Oq/tFXQg="), "keyId" : NumberLong("7366322220668289040") } }, "operationTime" : Timestamp(1715106392, 43) } bye' + echo Percona Server for MongoDB shell version v4.4.29-28 connecting to: 'mongodb://some-name-mongos.data-sharded-16822.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb' '{"t":{"$date":"2024-05-07T18:26:31.857Z"},"s":"I",' '"c":"NETWORK",' '"id":5490002,' '"ctx":"thread1","msg":"Started' a new thread for the timer 'service"}' Implicit session: session '{' '"id"' : 'UUID("626f31f8-34d2-4e33-875a-06d3bc2c7097")' '}' Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match switched to db app '{' '"ok"' : 1, '"$clusterTime"' : '{' '"clusterTime"' : 'Timestamp(1715106392,' '43),' '"signature"' : '{' '"hash"' : 'BinData(0,"U+RQO+x/703kG7NVlD/Oq/tFXQg="),' '"keyId"' : 'NumberLong("7366322220668289040")' '}' '},' '"operationTime"' : 'Timestamp(1715106392,' '43)' '}' bye + grep -q '"ok" : 1' + desc 'check if rs1 and all its related stateful sets are properly removed' + set +o xtrace ----------------------------------------------------------------------------------- check if rs1 and all its related stateful sets are properly removed ----------------------------------------------------------------------------------- + check_rs_proper_component_deletion some-name rs1 + local cluster=some-name + local rs_name=rs1 ++ kubectl_bin get psmdb some-name -ojson ++ jq --arg RS rs1 '.spec.replsets | map(.name == $RS) | index(true)' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4AOiTeEYIl +++ mktemp ++ local LAST_ERR=/tmp/tmp.cQptOEcAOx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -ojson ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4AOiTeEYIl ++ cat /tmp/tmp.cQptOEcAOx ++ rm /tmp/tmp.4AOiTeEYIl /tmp/tmp.cQptOEcAOx ++ return 0 + rs_idx=1 + kubectl_bin patch psmdb some-name --type=json '-p=[{'\''op'\'': '\''remove'\'', '\''path'\'': '\''/spec/replsets/1'\''}]' ++ mktemp + local LAST_OUT=/tmp/tmp.8u8PlxQ4cB ++ mktemp + local LAST_ERR=/tmp/tmp.YuYc6oiR4p + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=json '-p=[{'\''op'\'': '\''remove'\'', '\''path'\'': '\''/spec/replsets/1'\''}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8u8PlxQ4cB perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.YuYc6oiR4p + rm /tmp/tmp.8u8PlxQ4cB /tmp/tmp.YuYc6oiR4p + return 0 + echo -n 'Deleting replset rs1' Deleting replset rs1++ jq '.items | length' ++ kubectl_bin get sts -l app.kubernetes.io/instance=some-name,app.kubernetes.io/replset=rs1 -ojson +++ mktemp ++ local LAST_OUT=/tmp/tmp.gL5Tjyd4JY +++ mktemp ++ local LAST_ERR=/tmp/tmp.OoeChbryj9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get sts -l app.kubernetes.io/instance=some-name,app.kubernetes.io/replset=rs1 -ojson ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gL5Tjyd4JY ++ cat /tmp/tmp.OoeChbryj9 ++ rm /tmp/tmp.gL5Tjyd4JY /tmp/tmp.OoeChbryj9 ++ return 0 + [[ 2 -eq 0 ]] + let retry+=1 + '[' 1 -ge 70 ']' + echo -n . .+ sleep 30 ++ kubectl_bin get sts -l app.kubernetes.io/instance=some-name,app.kubernetes.io/replset=rs1 -ojson ++ jq '.items | length' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IdgFJwOka6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.1QmOSxdgee ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get sts -l app.kubernetes.io/instance=some-name,app.kubernetes.io/replset=rs1 -ojson ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IdgFJwOka6 ++ cat /tmp/tmp.1QmOSxdgee ++ rm /tmp/tmp.IdgFJwOka6 /tmp/tmp.1QmOSxdgee ++ return 0 + [[ 0 -eq 0 ]] + echo OK OK + desc 'check if rs2 and all its related stateful sets are properly removed' + set +o xtrace ----------------------------------------------------------------------------------- check if rs2 and all its related stateful sets are properly removed ----------------------------------------------------------------------------------- + check_rs_proper_component_deletion some-name rs2 + local cluster=some-name + local rs_name=rs2 ++ jq --arg RS rs2 '.spec.replsets | map(.name == $RS) | index(true)' ++ kubectl_bin get psmdb some-name -ojson +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZFAKNyfZQj +++ mktemp ++ local LAST_ERR=/tmp/tmp.tW6EP6XKPH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -ojson ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZFAKNyfZQj ++ cat /tmp/tmp.tW6EP6XKPH ++ rm /tmp/tmp.ZFAKNyfZQj /tmp/tmp.tW6EP6XKPH ++ return 0 + rs_idx=1 + kubectl_bin patch psmdb some-name --type=json '-p=[{'\''op'\'': '\''remove'\'', '\''path'\'': '\''/spec/replsets/1'\''}]' ++ mktemp + local LAST_OUT=/tmp/tmp.lYY0h1Xe25 ++ mktemp + local LAST_ERR=/tmp/tmp.kET3XzMIfH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=json '-p=[{'\''op'\'': '\''remove'\'', '\''path'\'': '\''/spec/replsets/1'\''}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lYY0h1Xe25 perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.kET3XzMIfH + rm /tmp/tmp.lYY0h1Xe25 /tmp/tmp.kET3XzMIfH + return 0 + echo -n 'Deleting replset rs2' Deleting replset rs2++ kubectl_bin get sts -l app.kubernetes.io/instance=some-name,app.kubernetes.io/replset=rs2 -ojson +++ mktemp ++ local LAST_OUT=/tmp/tmp.zUYI0kKj1w +++ mktemp ++ jq '.items | length' ++ local LAST_ERR=/tmp/tmp.zbYrcSHQvZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get sts -l app.kubernetes.io/instance=some-name,app.kubernetes.io/replset=rs2 -ojson ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zUYI0kKj1w ++ cat /tmp/tmp.zbYrcSHQvZ ++ rm /tmp/tmp.zUYI0kKj1w /tmp/tmp.zbYrcSHQvZ ++ return 0 + [[ 2 -eq 0 ]] + let retry+=1 + '[' 2 -ge 70 ']' + echo -n . .+ sleep 30 ++ kubectl_bin get sts -l app.kubernetes.io/instance=some-name,app.kubernetes.io/replset=rs2 -ojson ++ jq '.items | length' +++ mktemp ++ local LAST_OUT=/tmp/tmp.saPGJuRnfG +++ mktemp ++ local LAST_ERR=/tmp/tmp.y9MtiIKT44 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get sts -l app.kubernetes.io/instance=some-name,app.kubernetes.io/replset=rs2 -ojson ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.saPGJuRnfG ++ cat /tmp/tmp.y9MtiIKT44 ++ rm /tmp/tmp.saPGJuRnfG /tmp/tmp.y9MtiIKT44 ++ return 0 + [[ 2 -eq 0 ]] + let retry+=1 + '[' 3 -ge 70 ']' + echo -n . .+ sleep 30 ++ jq '.items | length' ++ kubectl_bin get sts -l app.kubernetes.io/instance=some-name,app.kubernetes.io/replset=rs2 -ojson +++ mktemp ++ local LAST_OUT=/tmp/tmp.rLKyMq4b24 +++ mktemp ++ local LAST_ERR=/tmp/tmp.bITnUz8V9o ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get sts -l app.kubernetes.io/instance=some-name,app.kubernetes.io/replset=rs2 -ojson ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rLKyMq4b24 ++ cat /tmp/tmp.bITnUz8V9o ++ rm /tmp/tmp.rLKyMq4b24 /tmp/tmp.bITnUz8V9o ++ return 0 + [[ 2 -eq 0 ]] + let retry+=1 + '[' 4 -ge 70 ']' + echo -n . .+ sleep 30 ++ kubectl_bin get sts -l app.kubernetes.io/instance=some-name,app.kubernetes.io/replset=rs2 -ojson ++ jq '.items | length' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nW2BtofoAm +++ mktemp ++ local LAST_ERR=/tmp/tmp.e0TCKm0iF5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get sts -l app.kubernetes.io/instance=some-name,app.kubernetes.io/replset=rs2 -ojson ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nW2BtofoAm ++ cat /tmp/tmp.e0TCKm0iF5 ++ rm /tmp/tmp.nW2BtofoAm /tmp/tmp.e0TCKm0iF5 ++ return 0 + [[ 2 -eq 0 ]] + let retry+=1 + '[' 5 -ge 70 ']' + echo -n . .+ sleep 30 ++ kubectl_bin get sts -l app.kubernetes.io/instance=some-name,app.kubernetes.io/replset=rs2 -ojson ++ jq '.items | length' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7gathjFbHb +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZFkpAlnR6O ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get sts -l app.kubernetes.io/instance=some-name,app.kubernetes.io/replset=rs2 -ojson ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7gathjFbHb ++ cat /tmp/tmp.ZFkpAlnR6O ++ rm /tmp/tmp.7gathjFbHb /tmp/tmp.ZFkpAlnR6O ++ return 0 + [[ 0 -eq 0 ]] + echo OK OK + destroy data-sharded-16822 + local namespace=data-sharded-16822 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.39BUDetEcc ++ mktemp + local LAST_ERR=/tmp/tmp.dzzVAVoYiN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.39BUDetEcc customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.dzzVAVoYiN + rm /tmp/tmp.39BUDetEcc /tmp/tmp.dzzVAVoYiN + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/deploy/crd.yaml + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.wdZxQrcojD ++ mktemp + local LAST_ERR=/tmp/tmp.T9vYGDbq3v + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wdZxQrcojD + cat /tmp/tmp.T9vYGDbq3v + rm /tmp/tmp.wdZxQrcojD /tmp/tmp.T9vYGDbq3v + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ciWygQCyvJ ++ mktemp + local LAST_ERR=/tmp/tmp.8q6KCspgW4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ciWygQCyvJ + cat /tmp/tmp.8q6KCspgW4 + rm /tmp/tmp.ciWygQCyvJ /tmp/tmp.8q6KCspgW4 + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.mGKNaYpUcE ++ mktemp + local LAST_ERR=/tmp/tmp.KRFNIOzkqK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mGKNaYpUcE + cat /tmp/tmp.KRFNIOzkqK + rm /tmp/tmp.mGKNaYpUcE /tmp/tmp.KRFNIOzkqK + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.vuDe3DpK8R ++ mktemp + local LAST_ERR=/tmp/tmp.iyizTd8PXb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1545/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vuDe3DpK8R clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.iyizTd8PXb + rm /tmp/tmp.vuDe3DpK8R /tmp/tmp.iyizTd8PXb + return 0 + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.4/cert-manager.yaml namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted serviceaccount "cert-manager" deleted serviceaccount "cert-manager-webhook" deleted configmap "cert-manager-webhook" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted namespace "cert-manager" deleted + : + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace data-sharded-16822 + rm -rf /tmp/tmp.hEqc3ybdu2 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.FU9xjB3kQ6 + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp + local LAST_OUT=/tmp/tmp.t59T4AuavT + local LAST_ERR=/tmp/tmp.ekR1p5If10 + local exit_status=0 + local timeout=4 ++ mktemp + local LAST_ERR=/tmp/tmp.vcdFbWr4mM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace data-sharded-16822 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator