Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/logs/demand-backup-physical-sharded.log + create_infra demand-backup-physical-sharded-6860 + local ns=demand-backup-physical-sharded-6860 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.6VfLcE51au ++ mktemp + local LAST_ERR=/tmp/tmp.sDXV6X0DJ4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6VfLcE51au customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.sDXV6X0DJ4 + rm /tmp/tmp.6VfLcE51au /tmp/tmp.sDXV6X0DJ4 + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/deploy/crd.yaml + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide Error from server (NotFound): Unable to list "psmdb.percona.com/v1, Resource=perconaservermongodbbackups": the server could not find the requested resource (get perconaservermongodbbackups.psmdb.percona.com) + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.S1eOfEUNvI ++ mktemp + local LAST_ERR=/tmp/tmp.tncFEm0pyU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.S1eOfEUNvI + cat /tmp/tmp.tncFEm0pyU + rm /tmp/tmp.S1eOfEUNvI /tmp/tmp.tncFEm0pyU + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.zvbwXwqgK1 ++ mktemp + local LAST_ERR=/tmp/tmp.1JBWzpmk9S + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zvbwXwqgK1 + cat /tmp/tmp.1JBWzpmk9S + rm /tmp/tmp.zvbwXwqgK1 /tmp/tmp.1JBWzpmk9S + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.7JQYQQnd7y ++ mktemp + local LAST_ERR=/tmp/tmp.QA28qtZlaN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7JQYQQnd7y + cat /tmp/tmp.QA28qtZlaN + rm /tmp/tmp.7JQYQQnd7y /tmp/tmp.QA28qtZlaN + return 0 + local rbac_yaml=rbac.yaml + '[' -n '' ']' + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/deploy/rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.OYJNvs1KdL ++ mktemp + local LAST_ERR=/tmp/tmp.3KtDfBwFiX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/deploy/rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OYJNvs1KdL role.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted serviceaccount "percona-server-mongodb-operator" deleted rolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.3KtDfBwFiX + rm /tmp/tmp.OYJNvs1KdL /tmp/tmp.3KtDfBwFiX + return 0 + check_crd_for_deletion PR-1352-9793733e + local git_tag=PR-1352-9793733e ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1352-9793733e/deploy/crd.yaml ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/bin/sed s/---//g ++ yq eval .metadata.name + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YOKXaYLo9l +++ mktemp ++ local LAST_ERR=/tmp/tmp.mPQRX7JHAI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.YOKXaYLo9l ++ cat /tmp/tmp.mPQRX7JHAI Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.YOKXaYLo9l ++ cat /tmp/tmp.mPQRX7JHAI Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.YOKXaYLo9l ++ cat /tmp/tmp.mPQRX7JHAI Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.YOKXaYLo9l ++ cat /tmp/tmp.mPQRX7JHAI Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.YOKXaYLo9l /tmp/tmp.mPQRX7JHAI ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n '' ']' + create_namespace demand-backup-physical-sharded-6860 + local namespace=demand-backup-physical-sharded-6860 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ tail -n1 ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl api-resources + '[' -n '' ']' ++ grep chaos-mesh.org ++ awk '{print $1}' ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces demand-backup-physical-sharded-6860' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-physical-sharded-6860 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace demand-backup-physical-sharded-6860 --ignore-not-found + awk '{print$1}' + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + xargs kubectl delete ns ++ mktemp + kubectl_bin get ns + local LAST_OUT=/tmp/tmp.2PS5aJAPlJ ++ mktemp + local LAST_OUT=/tmp/tmp.rDYajIqjgw ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.3TuVa6ENVu + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.G4KXWrb0to + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace demand-backup-physical-sharded-6860 --ignore-not-found + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2PS5aJAPlJ + cat /tmp/tmp.3TuVa6ENVu + rm /tmp/tmp.2PS5aJAPlJ /tmp/tmp.3TuVa6ENVu + return 0 + kubectl_bin wait --for=delete namespace demand-backup-physical-sharded-6860 ++ mktemp + local LAST_OUT=/tmp/tmp.8fpvLlmq96 ++ mktemp + local LAST_ERR=/tmp/tmp.vn7q4Ofzdr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace demand-backup-physical-sharded-6860 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rDYajIqjgw + cat /tmp/tmp.G4KXWrb0to + rm /tmp/tmp.rDYajIqjgw /tmp/tmp.G4KXWrb0to + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8fpvLlmq96 + cat /tmp/tmp.vn7q4Ofzdr + rm /tmp/tmp.8fpvLlmq96 /tmp/tmp.vn7q4Ofzdr + return 0 + desc 'create namespace demand-backup-physical-sharded-6860' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-physical-sharded-6860 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-physical-sharded-6860 ++ mktemp + local LAST_OUT=/tmp/tmp.H1KnEUZfy5 ++ mktemp + local LAST_ERR=/tmp/tmp.RM7YGowNKn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace demand-backup-physical-sharded-6860 namespace "demand-backup-physical-sharded-20708" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.H1KnEUZfy5 namespace/demand-backup-physical-sharded-6860 created + cat /tmp/tmp.RM7YGowNKn + rm /tmp/tmp.H1KnEUZfy5 /tmp/tmp.RM7YGowNKn + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.XdyO9Ya2Dg +++ mktemp ++ local LAST_ERR=/tmp/tmp.kCZ25HTkHA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XdyO9Ya2Dg ++ cat /tmp/tmp.kCZ25HTkHA ++ rm /tmp/tmp.XdyO9Ya2Dg /tmp/tmp.kCZ25HTkHA ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1352-9793733e-6-cluster1 --namespace=demand-backup-physical-sharded-6860 ++ mktemp + local LAST_OUT=/tmp/tmp.Dq4WbvhHN1 ++ mktemp + local LAST_ERR=/tmp/tmp.dj4qQf02qn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1352-9793733e-6-cluster1 --namespace=demand-backup-physical-sharded-6860 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Dq4WbvhHN1 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1352-9793733e-6-cluster1" modified. + cat /tmp/tmp.dj4qQf02qn + rm /tmp/tmp.Dq4WbvhHN1 /tmp/tmp.dj4qQf02qn + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.U4WoVepExk ++ mktemp + local LAST_ERR=/tmp/tmp.H4UCtndu1A + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.U4WoVepExk customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.H4UCtndu1A + rm /tmp/tmp.U4WoVepExk /tmp/tmp.H4UCtndu1A + return 0 + '[' -n '' ']' + apply_rbac rbac + local operator_namespace=psmdb-operator + local rbac=rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/deploy/rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.0rxc7pMSol ++ mktemp + local LAST_ERR=/tmp/tmp.0wAIEopAHz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0rxc7pMSol role.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created rolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.0wAIEopAHz + rm /tmp/tmp.0rxc7pMSol /tmp/tmp.0wAIEopAHz + return 0 + kubectl_bin apply -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1352-9793733e") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/deploy/operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.hX5fkjOI3I ++ mktemp + local LAST_ERR=/tmp/tmp.HICvSREuCn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hX5fkjOI3I deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.HICvSREuCn + rm /tmp/tmp.hX5fkjOI3I /tmp/tmp.HICvSREuCn + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wdA2oRqtHR +++ mktemp ++ local LAST_ERR=/tmp/tmp.WqXqbUBvPO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wdA2oRqtHR ++ cat /tmp/tmp.WqXqbUBvPO ++ rm /tmp/tmp.wdA2oRqtHR /tmp/tmp.WqXqbUBvPO ++ return 0 + wait_pod percona-server-mongodb-operator-6b6bcb669-rnf5t + local pod=percona-server-mongodb-operator-6b6bcb669-rnf5t + set +o xtrace waiting for pod/percona-server-mongodb-operator-6b6bcb669-rnf5t to be ready.OK + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://helm.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 8.0.5 --set accessKey=some-access-key --set secretKey=some-secret-key --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set environment.MINIO_REGION=us-east-1 --set environment.MINIO_HTTP_TRACE=/tmp/trace.log --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 8.0.5 --set accessKey=some-access-key --set secretKey=some-secret-key --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set environment.MINIO_REGION=us-east-1 --set environment.MINIO_HTTP_TRACE=/tmp/trace.log --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Thu Oct 5 19:15:33 2023 NAMESPACE: demand-backup-physical-sharded-6860 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: Minio can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.demand-backup-physical-sharded-6860.svc.cluster.local To access Minio from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace demand-backup-physical-sharded-6860 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace demand-backup-physical-sharded-6860 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access Minio server on http://localhost:9000. Follow the below steps to connect to Minio server with mc client: 1. Download the Minio mc client - https://docs.minio.io/docs/minio-client-quickstart-guide 2. Get the ACCESS_KEY=$(kubectl get secret minio-service -o jsonpath="{.data.accesskey}" | base64 --decode) and the SECRET_KEY=$(kubectl get secret minio-service -o jsonpath="{.data.secretkey}" | base64 --decode) 3. mc alias set minio-service-local http://localhost:9000 "$ACCESS_KEY" "$SECRET_KEY" --api s3v4 4. mc ls minio-service-local Alternately, you can use your browser or the Minio SDK to access the server - https://docs.minio.io/categories/17 ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2aXocywLeF +++ mktemp ++ local LAST_ERR=/tmp/tmp.zBB8xMGZSS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2aXocywLeF ++ cat /tmp/tmp.zBB8xMGZSS ++ rm /tmp/tmp.2aXocywLeF /tmp/tmp.zBB8xMGZSS ++ return 0 + MINIO_POD=minio-service-5fc4dc898-h59cf + wait_pod minio-service-5fc4dc898-h59cf + local pod=minio-service-5fc4dc898-h59cf + set +o xtrace waiting for pod/minio-service-5fc4dc898-h59cf to be ready....OK + '[' -n '' ']' + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.ZBJCi4J1ow ++ mktemp + local LAST_ERR=/tmp/tmp.68myLFdrk9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZBJCi4J1ow pod "aws-cli" deleted + cat /tmp/tmp.68myLFdrk9 If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.ZBJCi4J1ow /tmp/tmp.68myLFdrk9 + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.9sLONtWJkm ++ mktemp + local LAST_ERR=/tmp/tmp.Il7slBQ3oI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9sLONtWJkm secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.Il7slBQ3oI + rm /tmp/tmp.9sLONtWJkm /tmp/tmp.Il7slBQ3oI + return 0 + desc 'Testing on sharded cluster' + set +o xtrace ----------------------------------------------------------------------------------- Testing on sharded cluster ----------------------------------------------------------------------------------- + echo 'Creating PSMDB cluster' Creating PSMDB cluster + cluster=some-name + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.fmcnxuunzZ ++ mktemp + local LAST_ERR=/tmp/tmp.vD39CFv4c9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fmcnxuunzZ secret/some-users created + cat /tmp/tmp.vD39CFv4c9 + rm /tmp/tmp.fmcnxuunzZ /tmp/tmp.vD39CFv4c9 + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/conf/some-name-sharded.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/conf/some-name-sharded.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/conf/some-name-sharded.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod5.0"' + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + local LAST_OUT=/tmp/tmp.UqdwIkVpAZ ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + local LAST_ERR=/tmp/tmp.gswQXbeKf5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UqdwIkVpAZ perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.gswQXbeKf5 + rm /tmp/tmp.UqdwIkVpAZ /tmp/tmp.gswQXbeKf5 + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.MfvXiotWH2 ++ mktemp + local LAST_ERR=/tmp/tmp.13kcqWvDDG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/conf/client_with_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MfvXiotWH2 deployment.apps/psmdb-client created + cat /tmp/tmp.13kcqWvDDG + rm /tmp/tmp.MfvXiotWH2 /tmp/tmp.13kcqWvDDG + return 0 + echo 'check if all pods started' check if all pods started + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready............OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready......OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ERIJGhoAkt +++ mktemp ++ local LAST_ERR=/tmp/tmp.qrg75jhrN9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ERIJGhoAkt ++ cat /tmp/tmp.qrg75jhrN9 ++ rm /tmp/tmp.ERIJGhoAkt /tmp/tmp.qrg75jhrN9 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CWpu42Jvlw +++ mktemp ++ local LAST_ERR=/tmp/tmp.zAq54TETiU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CWpu42Jvlw ++ cat /tmp/tmp.zAq54TETiU ++ rm /tmp/tmp.CWpu42Jvlw /tmp/tmp.zAq54TETiU ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness................ + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fY02yp3lKm +++ mktemp ++ local LAST_ERR=/tmp/tmp.GcPiZ1vlv3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fY02yp3lKm ++ cat /tmp/tmp.GcPiZ1vlv3 ++ rm /tmp/tmp.fY02yp3lKm /tmp/tmp.GcPiZ1vlv3 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IdbCyczBoS +++ mktemp ++ local LAST_ERR=/tmp/tmp.DtV1jJ0AqD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IdbCyczBoS ++ cat /tmp/tmp.DtV1jJ0AqD ++ rm /tmp/tmp.IdbCyczBoS /tmp/tmp.DtV1jJ0AqD ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1w4N77At1Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.mt3NO08P4y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1w4N77At1Q ++ cat /tmp/tmp.mt3NO08P4y ++ rm /tmp/tmp.1w4N77At1Q /tmp/tmp.mt3NO08P4y ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8G7vcriqvC +++ mktemp ++ local LAST_ERR=/tmp/tmp.Wt97tBlEmx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8G7vcriqvC ++ cat /tmp/tmp.Wt97tBlEmx ++ rm /tmp/tmp.8G7vcriqvC /tmp/tmp.Wt97tBlEmx ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.etyjeUVGW4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.YC2htNqPLp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.etyjeUVGW4 ++ cat /tmp/tmp.YC2htNqPLp ++ rm /tmp/tmp.etyjeUVGW4 /tmp/tmp.YC2htNqPLp ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ jq -r 'select(.loadBalancer != null and .loadBalancer.ingress != null and .loadBalancer.ingress != []) | .loadBalancer.ingress[0][]' ++ kubectl_bin get svc some-name-mongos '-o=jsonpath={.status}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gMKwXmiWHc +++ mktemp ++ local LAST_ERR=/tmp/tmp.sFrn1nEamy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get svc some-name-mongos '-o=jsonpath={.status}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gMKwXmiWHc ++ cat /tmp/tmp.sFrn1nEamy ++ rm /tmp/tmp.gMKwXmiWHc /tmp/tmp.sFrn1nEamy ++ return 0 + lbEndpoint=34.29.171.73 + '[' -z 34.29.171.73 ']' + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.demand-backup-physical-sharded-6860 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.demand-backup-physical-sharded-6860 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tHHLMxiylV +++ mktemp ++ local LAST_ERR=/tmp/tmp.XfD3RQ2ivU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tHHLMxiylV ++ cat /tmp/tmp.XfD3RQ2ivU ++ rm /tmp/tmp.tHHLMxiylV /tmp/tmp.XfD3RQ2ivU ++ return 0 + local client_container=psmdb-client-577b7bcc6c-m5k6c + local mongo_flag= + kubectl_bin exec psmdb-client-577b7bcc6c-m5k6c -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.lMLj0954N2 ++ mktemp + local LAST_ERR=/tmp/tmp.CoCdNPj0Ew + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-577b7bcc6c-m5k6c -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lMLj0954N2 Percona Server for MongoDB shell version v4.4.24-23 connecting to: mongodb://some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("90ab6e7e-10d7-4d7e-a6f7-4b765088e76b") } Percona Server for MongoDB server version: v5.0.20-17 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.CoCdNPj0Ew + rm /tmp/tmp.lMLj0954N2 /tmp/tmp.CoCdNPj0Ew + return 0 + sleep 1 + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4Y5rqTGtFm +++ mktemp ++ local LAST_ERR=/tmp/tmp.T1UAAYXALS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4Y5rqTGtFm ++ cat /tmp/tmp.T1UAAYXALS ++ rm /tmp/tmp.4Y5rqTGtFm /tmp/tmp.T1UAAYXALS ++ return 0 + local client_container=psmdb-client-577b7bcc6c-m5k6c + local mongo_flag= + kubectl_bin exec psmdb-client-577b7bcc6c-m5k6c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.7jDc8k2xWh ++ mktemp + local LAST_ERR=/tmp/tmp.Xdh1I2N5Ak + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-577b7bcc6c-m5k6c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7jDc8k2xWh Percona Server for MongoDB shell version v4.4.24-23 connecting to: mongodb://some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("08a17aaa-6bb2-4c15-b59f-9ff8dafd9920") } Percona Server for MongoDB server version: v5.0.20-17 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.Xdh1I2N5Ak + rm /tmp/tmp.7jDc8k2xWh /tmp/tmp.Xdh1I2N5Ak + return 0 + sleep 5 + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.932hG7Yutn +++ mktemp ++ local LAST_ERR=/tmp/tmp.caoR1TbpJU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.932hG7Yutn ++ cat /tmp/tmp.caoR1TbpJU ++ rm /tmp/tmp.932hG7Yutn /tmp/tmp.caoR1TbpJU ++ return 0 + local client_container=psmdb-client-577b7bcc6c-m5k6c + local mongo_flag= + kubectl_bin exec psmdb-client-577b7bcc6c-m5k6c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.H2vIYc15Io ++ mktemp + local LAST_ERR=/tmp/tmp.BJAXPIflKK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-577b7bcc6c-m5k6c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.H2vIYc15Io + cat /tmp/tmp.BJAXPIflKK + rm /tmp/tmp.H2vIYc15Io /tmp/tmp.BJAXPIflKK + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/find-sharded.json /tmp/tmp.0TFKOgWJzU/find-sharded + echo 'running backups' running backups + backup_name_minio=backup-minio-sharded + run_backup minio backup-minio-sharded + local storage=minio + local backup_name=backup-minio-sharded + kubectl_bin apply -f - ++ mktemp + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/conf/backup.yml + /usr/bin/sed -e 's/name:/name: backup-minio-sharded/' + /usr/bin/sed -e 's/storageName:/storageName: minio/' + local LAST_OUT=/tmp/tmp.Yl0dWPOwP5 ++ mktemp + local LAST_ERR=/tmp/tmp.xf3TF5L8lt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Yl0dWPOwP5 perconaservermongodbbackup.psmdb.percona.com/backup-minio-sharded created + cat /tmp/tmp.xf3TF5L8lt + rm /tmp/tmp.Yl0dWPOwP5 /tmp/tmp.xf3TF5L8lt + return 0 + '[' -z '' ']' + backup_name_aws=backup-aws-s3-sharded + backup_name_gcp=backup-gcp-cs-sharded + backup_name_azure=backup-azure-blob-sharded + run_backup aws-s3 backup-aws-s3-sharded + local storage=aws-s3 + local backup_name=backup-aws-s3-sharded + /usr/bin/sed -e 's/name:/name: backup-aws-s3-sharded/' + /usr/bin/sed -e 's/storageName:/storageName: aws-s3/' + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/conf/backup.yml ++ mktemp + local LAST_OUT=/tmp/tmp.8eRtZHUujp ++ mktemp + local LAST_ERR=/tmp/tmp.aZEpKLj3E5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8eRtZHUujp perconaservermongodbbackup.psmdb.percona.com/backup-aws-s3-sharded created + cat /tmp/tmp.aZEpKLj3E5 + rm /tmp/tmp.8eRtZHUujp /tmp/tmp.aZEpKLj3E5 + return 0 + run_backup gcp-cs backup-gcp-cs-sharded + local storage=gcp-cs + local backup_name=backup-gcp-cs-sharded + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/conf/backup.yml + /usr/bin/sed -e 's/name:/name: backup-gcp-cs-sharded/' + /usr/bin/sed -e 's/storageName:/storageName: gcp-cs/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.w8pE60vz6C ++ mktemp + local LAST_ERR=/tmp/tmp.rBXxexuUHP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.w8pE60vz6C perconaservermongodbbackup.psmdb.percona.com/backup-gcp-cs-sharded created + cat /tmp/tmp.rBXxexuUHP + rm /tmp/tmp.w8pE60vz6C /tmp/tmp.rBXxexuUHP + return 0 + run_backup azure-blob backup-azure-blob-sharded + local storage=azure-blob + local backup_name=backup-azure-blob-sharded + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/conf/backup.yml + /usr/bin/sed -e 's/name:/name: backup-azure-blob-sharded/' + /usr/bin/sed -e 's/storageName:/storageName: azure-blob/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.2yMsGjbfHP ++ mktemp + local LAST_ERR=/tmp/tmp.VSo99HEifk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2yMsGjbfHP perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob-sharded created + cat /tmp/tmp.VSo99HEifk + rm /tmp/tmp.2yMsGjbfHP /tmp/tmp.VSo99HEifk + return 0 + wait_backup backup-aws-s3-sharded + local backup_name=backup-aws-s3-sharded + set +o xtrace backup-aws-s3-sharded......................... + wait_backup backup-gcp-cs-sharded + local backup_name=backup-gcp-cs-sharded + set +o xtrace backup-gcp-cs-sharded.............. + wait_backup backup-azure-blob-sharded + local backup_name=backup-azure-blob-sharded + set +o xtrace backup-azure-blob-sharded.............. + wait_backup backup-minio-sharded + local backup_name=backup-minio-sharded + set +o xtrace backup-minio-sharded. + '[' -z '' ']' + echo 'drop collection' drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Gq272ZJkrL +++ mktemp ++ local LAST_ERR=/tmp/tmp.GB7oXW1IXC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Gq272ZJkrL ++ cat /tmp/tmp.GB7oXW1IXC ++ rm /tmp/tmp.Gq272ZJkrL /tmp/tmp.GB7oXW1IXC ++ return 0 + local client_container=psmdb-client-577b7bcc6c-m5k6c + local mongo_flag= + kubectl_bin exec psmdb-client-577b7bcc6c-m5k6c -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.aNpVgnfa3D ++ mktemp + local LAST_ERR=/tmp/tmp.E6NEhmDDfK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-577b7bcc6c-m5k6c -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aNpVgnfa3D Percona Server for MongoDB shell version v4.4.24-23 connecting to: mongodb://some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("5b8ce52a-2693-4187-8858-1be11a53ebc0") } Percona Server for MongoDB server version: v5.0.20-17 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.E6NEhmDDfK + rm /tmp/tmp.aNpVgnfa3D /tmp/tmp.E6NEhmDDfK + return 0 + echo 'check backup and restore -- aws-s3' check backup and restore -- aws-s3 + run_restore backup-aws-s3-sharded _restore_sharded + local backup_name=backup-aws-s3-sharded + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/conf/restore.yml + /usr/bin/sed -e 's/name:/name: restore-backup-aws-s3-sharded/' + /usr/bin/sed -e 's/backupName:/backupName: backup-aws-s3-sharded/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.XTfbTCStPz ++ mktemp + local LAST_ERR=/tmp/tmp.2TKFoEuSpv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XTfbTCStPz perconaservermongodbrestore.psmdb.percona.com/restore-backup-aws-s3-sharded created + cat /tmp/tmp.2TKFoEuSpv + rm /tmp/tmp.XTfbTCStPz /tmp/tmp.2TKFoEuSpv + return 0 + run_recovery_check backup-aws-s3-sharded _restore_sharded + local backup_name=backup-aws-s3-sharded + local compare_suffix=_restore_sharded + wait_restore backup-aws-s3-sharded some-name requested 0 420 + local backup_name=backup-aws-s3-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=420 + set +o xtrace waiting psmdb-restore/backup-aws-s3-sharded to reach requested state................................................................................................................................................................................................................................... + '[' 0 -eq 1 ']' + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-physical-sharded-6860", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.adFr0sHdTD ++ mktemp + local LAST_ERR=/tmp/tmp.xH2mUheEmD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.adFr0sHdTD + cat /tmp/tmp.xH2mUheEmD + rm /tmp/tmp.adFr0sHdTD /tmp/tmp.xH2mUheEmD + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.24 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + wait_restore backup-aws-s3-sharded some-name ready 0 900 + local backup_name=backup-aws-s3-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=900 + set +o xtrace waiting psmdb-restore/backup-aws-s3-sharded to reach ready state........................................................................ + '[' 0 -eq 1 ']' + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.8iuGE64WiA ++ mktemp + local LAST_ERR=/tmp/tmp.kfzzxOWj1K + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8iuGE64WiA apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-physical-sharded-6860"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-physical-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"minio":{"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod5.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"exposeType":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2023-10-05T19:16:13Z" generation: 2 name: some-name namespace: demand-backup-physical-sharded-6860 resourceVersion: "18455" uid: 42c790e2-c70a-4f39-8fbb-1ddb01cc66f6 spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-physical-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 minio: s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.15.0 image: perconalab/percona-server-mongodb-operator:main-mongod5.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: exposeType: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: conditions: - lastTransitionTime: "2023-10-05T19:16:15Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:18:05Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:18:05Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:18:46Z" reason: MongosReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:18:46Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:18:55Z" status: "True" type: ready - lastTransitionTime: "2023-10-05T19:23:22Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:24:05Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:24:05Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:24:41Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:24:41Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:25:17Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:25:17Z" status: "True" type: initializing host: 34.29.171.73 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod5.0 mongoVersion: 5.0.20-17 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 0 replsets: cfg: initialized: true ready: 0 size: 3 status: initializing rs0: added_as_shard: true initialized: true ready: 0 size: 3 status: initializing size: 6 state: initializing + cat /tmp/tmp.kfzzxOWj1K + rm /tmp/tmp.8iuGE64WiA /tmp/tmp.kfzzxOWj1K + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VEh9VocC08 +++ mktemp ++ local LAST_ERR=/tmp/tmp.D5Y0ap60fU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VEh9VocC08 ++ cat /tmp/tmp.D5Y0ap60fU ++ rm /tmp/tmp.VEh9VocC08 /tmp/tmp.D5Y0ap60fU ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YCOBcmmg1t +++ mktemp ++ local LAST_ERR=/tmp/tmp.HkG8bd1wUv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YCOBcmmg1t ++ cat /tmp/tmp.HkG8bd1wUv ++ rm /tmp/tmp.YCOBcmmg1t /tmp/tmp.HkG8bd1wUv ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cRjxjGfTig +++ mktemp ++ local LAST_ERR=/tmp/tmp.lo91fTgWKJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cRjxjGfTig ++ cat /tmp/tmp.lo91fTgWKJ ++ rm /tmp/tmp.cRjxjGfTig /tmp/tmp.lo91fTgWKJ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NHcbgBH7ST +++ mktemp ++ local LAST_ERR=/tmp/tmp.V6MK1xRrZY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NHcbgBH7ST ++ cat /tmp/tmp.V6MK1xRrZY ++ rm /tmp/tmp.NHcbgBH7ST /tmp/tmp.V6MK1xRrZY ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.l195tK2nWE +++ mktemp ++ local LAST_ERR=/tmp/tmp.l48sNx9nPY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.l195tK2nWE ++ cat /tmp/tmp.l48sNx9nPY ++ rm /tmp/tmp.l195tK2nWE /tmp/tmp.l48sNx9nPY ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pOhyDsrcWA +++ mktemp ++ local LAST_ERR=/tmp/tmp.btlbD1S5uf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pOhyDsrcWA ++ cat /tmp/tmp.btlbD1S5uf ++ rm /tmp/tmp.pOhyDsrcWA /tmp/tmp.btlbD1S5uf ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.izO7M0XLCM +++ mktemp ++ local LAST_ERR=/tmp/tmp.NIpVY8BiDo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.izO7M0XLCM ++ cat /tmp/tmp.NIpVY8BiDo ++ rm /tmp/tmp.izO7M0XLCM /tmp/tmp.NIpVY8BiDo ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dnDh7UqnTE +++ mktemp ++ local LAST_ERR=/tmp/tmp.h0p1MRn2DY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dnDh7UqnTE ++ cat /tmp/tmp.h0p1MRn2DY ++ rm /tmp/tmp.dnDh7UqnTE /tmp/tmp.h0p1MRn2DY ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YjyEGa02Gc +++ mktemp ++ local LAST_ERR=/tmp/tmp.khBDKkVhTO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YjyEGa02Gc ++ cat /tmp/tmp.khBDKkVhTO ++ rm /tmp/tmp.YjyEGa02Gc /tmp/tmp.khBDKkVhTO ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SsGwnhpTof +++ mktemp ++ local LAST_ERR=/tmp/tmp.UaN2RYVCM1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SsGwnhpTof ++ cat /tmp/tmp.UaN2RYVCM1 ++ rm /tmp/tmp.SsGwnhpTof /tmp/tmp.UaN2RYVCM1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OSSrpoBi4Z +++ mktemp ++ local LAST_ERR=/tmp/tmp.aCsWmYFyDS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OSSrpoBi4Z ++ cat /tmp/tmp.aCsWmYFyDS ++ rm /tmp/tmp.OSSrpoBi4Z /tmp/tmp.aCsWmYFyDS ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TRyskOVsIl +++ mktemp ++ local LAST_ERR=/tmp/tmp.R7wje7JZKu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TRyskOVsIl ++ cat /tmp/tmp.R7wje7JZKu ++ rm /tmp/tmp.TRyskOVsIl /tmp/tmp.R7wje7JZKu ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4n3GkpJ3Z1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.GXrNEFSbZV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4n3GkpJ3Z1 ++ cat /tmp/tmp.GXrNEFSbZV ++ rm /tmp/tmp.4n3GkpJ3Z1 /tmp/tmp.GXrNEFSbZV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EcDnyJVDC5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.YiL4865eHk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EcDnyJVDC5 ++ cat /tmp/tmp.YiL4865eHk ++ rm /tmp/tmp.EcDnyJVDC5 /tmp/tmp.YiL4865eHk ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mD211WwUgx +++ mktemp ++ local LAST_ERR=/tmp/tmp.budPEMG8Yu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mD211WwUgx ++ cat /tmp/tmp.budPEMG8Yu ++ rm /tmp/tmp.mD211WwUgx /tmp/tmp.budPEMG8Yu ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CjCeVHMUXw +++ mktemp ++ local LAST_ERR=/tmp/tmp.z0raAXv8l5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CjCeVHMUXw ++ cat /tmp/tmp.z0raAXv8l5 ++ rm /tmp/tmp.CjCeVHMUXw /tmp/tmp.z0raAXv8l5 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AN6zHi05w8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.TjH63p0xJ4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AN6zHi05w8 ++ cat /tmp/tmp.TjH63p0xJ4 ++ rm /tmp/tmp.AN6zHi05w8 /tmp/tmp.TjH63p0xJ4 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RuGt2d2YH7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.sMvPaApKTj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RuGt2d2YH7 ++ cat /tmp/tmp.sMvPaApKTj ++ rm /tmp/tmp.RuGt2d2YH7 /tmp/tmp.sMvPaApKTj ++ return 0 + [[ ready == \r\e\a\d\y ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 mongodb '' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mwaeReNcV4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.CXR1i8rNDn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mwaeReNcV4 ++ cat /tmp/tmp.CXR1i8rNDn ++ rm /tmp/tmp.mwaeReNcV4 /tmp/tmp.CXR1i8rNDn ++ return 0 + local client_container=psmdb-client-577b7bcc6c-m5k6c + local mongo_flag= + kubectl_bin exec psmdb-client-577b7bcc6c-m5k6c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.3s5lJUOs6q ++ mktemp + local LAST_ERR=/tmp/tmp.6AplmrWVYJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-577b7bcc6c-m5k6c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3s5lJUOs6q + cat /tmp/tmp.6AplmrWVYJ + rm /tmp/tmp.3s5lJUOs6q /tmp/tmp.6AplmrWVYJ + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/find-sharded.json /tmp/tmp.0TFKOgWJzU/find-sharded + echo + set -o xtrace + check_exported_mongos_service_endpoint 34.29.171.73 + local host=34.29.171.73 ++ kubectl_bin get psmdb some-name '-o=jsonpath={.status.host}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.es0uqixthN +++ mktemp ++ local LAST_ERR=/tmp/tmp.okhTmlJkl1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name '-o=jsonpath={.status.host}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.es0uqixthN ++ cat /tmp/tmp.okhTmlJkl1 ++ rm /tmp/tmp.es0uqixthN /tmp/tmp.okhTmlJkl1 ++ return 0 + '[' 34.29.171.73 '!=' 34.29.171.73 ']' + echo 'drop collection' drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.z64rsGcCh9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.nmO6tJgp3N ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.z64rsGcCh9 ++ cat /tmp/tmp.nmO6tJgp3N ++ rm /tmp/tmp.z64rsGcCh9 /tmp/tmp.nmO6tJgp3N ++ return 0 + local client_container=psmdb-client-577b7bcc6c-m5k6c + local mongo_flag= + kubectl_bin exec psmdb-client-577b7bcc6c-m5k6c -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.k5bAHIixma ++ mktemp + local LAST_ERR=/tmp/tmp.fyBY8SEJOD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-577b7bcc6c-m5k6c -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.k5bAHIixma Percona Server for MongoDB shell version v4.4.24-23 connecting to: mongodb://some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("d840d8dd-cafb-4ac7-88f5-6feb8d099468") } Percona Server for MongoDB server version: v5.0.20-17 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.fyBY8SEJOD + rm /tmp/tmp.k5bAHIixma /tmp/tmp.fyBY8SEJOD + return 0 + echo 'check backup and restore -- gcp-cs' check backup and restore -- gcp-cs + run_restore backup-gcp-cs-sharded _restore_sharded + local backup_name=backup-gcp-cs-sharded + /usr/bin/sed -e 's/name:/name: restore-backup-gcp-cs-sharded/' + /usr/bin/sed -e 's/backupName:/backupName: backup-gcp-cs-sharded/' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/conf/restore.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.PMZSgxKrkr ++ mktemp + local LAST_ERR=/tmp/tmp.gPSGcGPBfy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PMZSgxKrkr perconaservermongodbrestore.psmdb.percona.com/restore-backup-gcp-cs-sharded created + cat /tmp/tmp.gPSGcGPBfy + rm /tmp/tmp.PMZSgxKrkr /tmp/tmp.gPSGcGPBfy + return 0 + run_recovery_check backup-gcp-cs-sharded _restore_sharded + local backup_name=backup-gcp-cs-sharded + local compare_suffix=_restore_sharded + wait_restore backup-gcp-cs-sharded some-name requested 0 420 + local backup_name=backup-gcp-cs-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=420 + set +o xtrace waiting psmdb-restore/backup-gcp-cs-sharded to reach requested state.................................................................................................................................................................................................................................................................................. + '[' 0 -eq 1 ']' + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-physical-sharded-6860", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.jRGuH3TMUD ++ mktemp + local LAST_ERR=/tmp/tmp.ob28TYHHNr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jRGuH3TMUD + cat /tmp/tmp.ob28TYHHNr + rm /tmp/tmp.jRGuH3TMUD /tmp/tmp.ob28TYHHNr + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.24 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + wait_restore backup-gcp-cs-sharded some-name ready 0 900 + local backup_name=backup-gcp-cs-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=900 + set +o xtrace waiting psmdb-restore/backup-gcp-cs-sharded to reach ready state.................................................................................. + '[' 0 -eq 1 ']' + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.jYS1ftxaMB ++ mktemp + local LAST_ERR=/tmp/tmp.UtikTctLJx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jYS1ftxaMB apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-physical-sharded-6860"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-physical-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"minio":{"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod5.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"exposeType":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2023-10-05T19:16:13Z" generation: 2 name: some-name namespace: demand-backup-physical-sharded-6860 resourceVersion: "26925" uid: 42c790e2-c70a-4f39-8fbb-1ddb01cc66f6 spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-physical-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 minio: s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.15.0 image: perconalab/percona-server-mongodb-operator:main-mongod5.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: exposeType: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: conditions: - lastTransitionTime: "2023-10-05T19:18:55Z" status: "True" type: ready - lastTransitionTime: "2023-10-05T19:23:22Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:24:05Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:24:05Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:24:41Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:24:41Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:25:17Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:25:17Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:38:00Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:38:00Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:38:24Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:38:24Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:38:38Z" reason: MongosReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:38:52Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:39:35Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:39:35Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:40:12Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:40:12Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:43:11Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:43:11Z" status: "True" type: initializing host: 34.29.171.73 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod5.0 mongoVersion: 5.0.20-17 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 0 replsets: cfg: initialized: true ready: 0 size: 3 status: initializing rs0: added_as_shard: true initialized: true ready: 0 size: 3 status: initializing size: 6 state: initializing + cat /tmp/tmp.UtikTctLJx + rm /tmp/tmp.jYS1ftxaMB /tmp/tmp.UtikTctLJx + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.O23pSzTE77 +++ mktemp ++ local LAST_ERR=/tmp/tmp.7hUk8Ju0nn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.O23pSzTE77 ++ cat /tmp/tmp.7hUk8Ju0nn ++ rm /tmp/tmp.O23pSzTE77 /tmp/tmp.7hUk8Ju0nn ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jSBzjSbmJe +++ mktemp ++ local LAST_ERR=/tmp/tmp.wpMJ3D2mog ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jSBzjSbmJe ++ cat /tmp/tmp.wpMJ3D2mog ++ rm /tmp/tmp.jSBzjSbmJe /tmp/tmp.wpMJ3D2mog ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nknPcGEInt +++ mktemp ++ local LAST_ERR=/tmp/tmp.KTfZ3t5QeE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nknPcGEInt ++ cat /tmp/tmp.KTfZ3t5QeE ++ rm /tmp/tmp.nknPcGEInt /tmp/tmp.KTfZ3t5QeE ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.blDwI49HJO +++ mktemp ++ local LAST_ERR=/tmp/tmp.5n31pWrP7P ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.blDwI49HJO ++ cat /tmp/tmp.5n31pWrP7P ++ rm /tmp/tmp.blDwI49HJO /tmp/tmp.5n31pWrP7P ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q6rT06pa0K +++ mktemp ++ local LAST_ERR=/tmp/tmp.6C1lwWgKQu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q6rT06pa0K ++ cat /tmp/tmp.6C1lwWgKQu ++ rm /tmp/tmp.q6rT06pa0K /tmp/tmp.6C1lwWgKQu ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3jARqLTqVL +++ mktemp ++ local LAST_ERR=/tmp/tmp.GaVPf2QB41 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3jARqLTqVL ++ cat /tmp/tmp.GaVPf2QB41 ++ rm /tmp/tmp.3jARqLTqVL /tmp/tmp.GaVPf2QB41 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WNYB2OAnSl +++ mktemp ++ local LAST_ERR=/tmp/tmp.PDPfqfjJlX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WNYB2OAnSl ++ cat /tmp/tmp.PDPfqfjJlX ++ rm /tmp/tmp.WNYB2OAnSl /tmp/tmp.PDPfqfjJlX ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oi5Xckyf33 +++ mktemp ++ local LAST_ERR=/tmp/tmp.416MgYDdCL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oi5Xckyf33 ++ cat /tmp/tmp.416MgYDdCL ++ rm /tmp/tmp.oi5Xckyf33 /tmp/tmp.416MgYDdCL ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5pW4cYdNmJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.xDLFIJ1mfS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5pW4cYdNmJ ++ cat /tmp/tmp.xDLFIJ1mfS ++ rm /tmp/tmp.5pW4cYdNmJ /tmp/tmp.xDLFIJ1mfS ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zi3BMy10tk +++ mktemp ++ local LAST_ERR=/tmp/tmp.l0GaNemtZi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zi3BMy10tk ++ cat /tmp/tmp.l0GaNemtZi ++ rm /tmp/tmp.zi3BMy10tk /tmp/tmp.l0GaNemtZi ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KGuxO1Wl5e +++ mktemp ++ local LAST_ERR=/tmp/tmp.2VmMRqu0KW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KGuxO1Wl5e ++ cat /tmp/tmp.2VmMRqu0KW ++ rm /tmp/tmp.KGuxO1Wl5e /tmp/tmp.2VmMRqu0KW ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wLwtM5dMsr +++ mktemp ++ local LAST_ERR=/tmp/tmp.imkfYMvpTt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wLwtM5dMsr ++ cat /tmp/tmp.imkfYMvpTt ++ rm /tmp/tmp.wLwtM5dMsr /tmp/tmp.imkfYMvpTt ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EVPjo4KjLl +++ mktemp ++ local LAST_ERR=/tmp/tmp.TpJF8Nm5Ca ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EVPjo4KjLl ++ cat /tmp/tmp.TpJF8Nm5Ca ++ rm /tmp/tmp.EVPjo4KjLl /tmp/tmp.TpJF8Nm5Ca ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rGethddFdw +++ mktemp ++ local LAST_ERR=/tmp/tmp.AlOBzRDvFk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rGethddFdw ++ cat /tmp/tmp.AlOBzRDvFk ++ rm /tmp/tmp.rGethddFdw /tmp/tmp.AlOBzRDvFk ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rNI0GKn0dJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.yEGts7ML5V ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rNI0GKn0dJ ++ cat /tmp/tmp.yEGts7ML5V ++ rm /tmp/tmp.rNI0GKn0dJ /tmp/tmp.yEGts7ML5V ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2zRIZUn3mh +++ mktemp ++ local LAST_ERR=/tmp/tmp.swcLucVBVj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2zRIZUn3mh ++ cat /tmp/tmp.swcLucVBVj ++ rm /tmp/tmp.2zRIZUn3mh /tmp/tmp.swcLucVBVj ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fEAB6uOzRL +++ mktemp ++ local LAST_ERR=/tmp/tmp.NKlj1riJCT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fEAB6uOzRL ++ cat /tmp/tmp.NKlj1riJCT ++ rm /tmp/tmp.fEAB6uOzRL /tmp/tmp.NKlj1riJCT ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7n2MWZVTP2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.F2FE2lmFuR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7n2MWZVTP2 ++ cat /tmp/tmp.F2FE2lmFuR ++ rm /tmp/tmp.7n2MWZVTP2 /tmp/tmp.F2FE2lmFuR ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 17 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jXzNd4jmH6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.JJ0pMTXl3G ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jXzNd4jmH6 ++ cat /tmp/tmp.JJ0pMTXl3G ++ rm /tmp/tmp.jXzNd4jmH6 /tmp/tmp.JJ0pMTXl3G ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 18 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ebXjY9ZatH +++ mktemp ++ local LAST_ERR=/tmp/tmp.WqHlvhsJEV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ebXjY9ZatH ++ cat /tmp/tmp.WqHlvhsJEV ++ rm /tmp/tmp.ebXjY9ZatH /tmp/tmp.WqHlvhsJEV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 19 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.32buA5Ujds +++ mktemp ++ local LAST_ERR=/tmp/tmp.jTdiyxjGfL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.32buA5Ujds ++ cat /tmp/tmp.jTdiyxjGfL ++ rm /tmp/tmp.32buA5Ujds /tmp/tmp.jTdiyxjGfL ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 20 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IouAcjloHY +++ mktemp ++ local LAST_ERR=/tmp/tmp.PqBMJYO9eX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IouAcjloHY ++ cat /tmp/tmp.PqBMJYO9eX ++ rm /tmp/tmp.IouAcjloHY /tmp/tmp.PqBMJYO9eX ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 21 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Gz6OM0rIip +++ mktemp ++ local LAST_ERR=/tmp/tmp.u6mL1R4LLK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Gz6OM0rIip ++ cat /tmp/tmp.u6mL1R4LLK ++ rm /tmp/tmp.Gz6OM0rIip /tmp/tmp.u6mL1R4LLK ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 22 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zXpaueg7Wh +++ mktemp ++ local LAST_ERR=/tmp/tmp.ICa0pZQPPK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zXpaueg7Wh ++ cat /tmp/tmp.ICa0pZQPPK ++ rm /tmp/tmp.zXpaueg7Wh /tmp/tmp.ICa0pZQPPK ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 23 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2hT7yC4yUu +++ mktemp ++ local LAST_ERR=/tmp/tmp.UBpjbaHaBS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2hT7yC4yUu ++ cat /tmp/tmp.UBpjbaHaBS ++ rm /tmp/tmp.2hT7yC4yUu /tmp/tmp.UBpjbaHaBS ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 24 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.n8WcuPxVTH +++ mktemp ++ local LAST_ERR=/tmp/tmp.27QNc14Xlq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.n8WcuPxVTH ++ cat /tmp/tmp.27QNc14Xlq ++ rm /tmp/tmp.n8WcuPxVTH /tmp/tmp.27QNc14Xlq ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 25 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sguTFB7cxw +++ mktemp ++ local LAST_ERR=/tmp/tmp.1rWZCrFtZQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sguTFB7cxw ++ cat /tmp/tmp.1rWZCrFtZQ ++ rm /tmp/tmp.sguTFB7cxw /tmp/tmp.1rWZCrFtZQ ++ return 0 + [[ ready == \r\e\a\d\y ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local driver=mongodb + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match' + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TKNgXxR7rc +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZbDHRnIoDz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TKNgXxR7rc ++ cat /tmp/tmp.ZbDHRnIoDz ++ rm /tmp/tmp.TKNgXxR7rc /tmp/tmp.ZbDHRnIoDz ++ return 0 + local client_container=psmdb-client-577b7bcc6c-rxlrk + local mongo_flag= + kubectl_bin exec psmdb-client-577b7bcc6c-rxlrk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.1nzLdhNBXR ++ mktemp + local LAST_ERR=/tmp/tmp.dJS2xn89nx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-577b7bcc6c-rxlrk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1nzLdhNBXR + cat /tmp/tmp.dJS2xn89nx + rm /tmp/tmp.1nzLdhNBXR /tmp/tmp.dJS2xn89nx + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/find-sharded.json /tmp/tmp.0TFKOgWJzU/find-sharded + echo + set -o xtrace + check_exported_mongos_service_endpoint 34.29.171.73 + local host=34.29.171.73 ++ kubectl_bin get psmdb some-name '-o=jsonpath={.status.host}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Iex57Kzg1z +++ mktemp ++ local LAST_ERR=/tmp/tmp.Sr60ONQgGm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name '-o=jsonpath={.status.host}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Iex57Kzg1z ++ cat /tmp/tmp.Sr60ONQgGm ++ rm /tmp/tmp.Iex57Kzg1z /tmp/tmp.Sr60ONQgGm ++ return 0 + '[' 34.29.171.73 '!=' 34.29.171.73 ']' + echo 'drop collection' drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kWeGKC6EwN +++ mktemp ++ local LAST_ERR=/tmp/tmp.T7R6O1yh9f ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kWeGKC6EwN ++ cat /tmp/tmp.T7R6O1yh9f ++ rm /tmp/tmp.kWeGKC6EwN /tmp/tmp.T7R6O1yh9f ++ return 0 + local client_container=psmdb-client-577b7bcc6c-rxlrk + local mongo_flag= + kubectl_bin exec psmdb-client-577b7bcc6c-rxlrk -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.wvYjZ2ts6H ++ mktemp + local LAST_ERR=/tmp/tmp.Wzmbs7dSI7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-577b7bcc6c-rxlrk -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wvYjZ2ts6H Percona Server for MongoDB shell version v4.4.24-23 connecting to: mongodb://some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("1e8edb32-01bb-45a5-82f4-1c2dbca78245") } Percona Server for MongoDB server version: v5.0.20-17 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.Wzmbs7dSI7 + rm /tmp/tmp.wvYjZ2ts6H /tmp/tmp.Wzmbs7dSI7 + return 0 + echo 'check backup and restore -- azure-blob' check backup and restore -- azure-blob + run_restore backup-azure-blob-sharded _restore_sharded + local backup_name=backup-azure-blob-sharded + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/conf/restore.yml + /usr/bin/sed -e 's/name:/name: restore-backup-azure-blob-sharded/' + /usr/bin/sed -e 's/backupName:/backupName: backup-azure-blob-sharded/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.KTVyqIHDZ5 ++ mktemp + local LAST_ERR=/tmp/tmp.0M7c1gJ0nV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KTVyqIHDZ5 perconaservermongodbrestore.psmdb.percona.com/restore-backup-azure-blob-sharded created + cat /tmp/tmp.0M7c1gJ0nV + rm /tmp/tmp.KTVyqIHDZ5 /tmp/tmp.0M7c1gJ0nV + return 0 + run_recovery_check backup-azure-blob-sharded _restore_sharded + local backup_name=backup-azure-blob-sharded + local compare_suffix=_restore_sharded + wait_restore backup-azure-blob-sharded some-name requested 0 420 + local backup_name=backup-azure-blob-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=420 + set +o xtrace waiting psmdb-restore/backup-azure-blob-sharded to reach requested state........................................................................................................................................... + '[' 0 -eq 1 ']' + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-physical-sharded-6860", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.25F2VyM829 ++ mktemp + local LAST_ERR=/tmp/tmp.zrlaStgDCv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.25F2VyM829 + cat /tmp/tmp.zrlaStgDCv + rm /tmp/tmp.25F2VyM829 /tmp/tmp.zrlaStgDCv + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.24 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + wait_restore backup-azure-blob-sharded some-name ready 0 900 + local backup_name=backup-azure-blob-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=900 + set +o xtrace waiting psmdb-restore/backup-azure-blob-sharded to reach ready state................................................................................... + '[' 0 -eq 1 ']' + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.2OMYcwKv0z ++ mktemp + local LAST_ERR=/tmp/tmp.Y7WR3OwSIe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2OMYcwKv0z apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-physical-sharded-6860"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-physical-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"minio":{"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod5.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"exposeType":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2023-10-05T19:16:13Z" generation: 2 name: some-name namespace: demand-backup-physical-sharded-6860 resourceVersion: "33404" uid: 42c790e2-c70a-4f39-8fbb-1ddb01cc66f6 spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-physical-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 minio: s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.15.0 image: perconalab/percona-server-mongodb-operator:main-mongod5.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: exposeType: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: conditions: - lastTransitionTime: "2023-10-05T19:38:38Z" reason: MongosReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:38:52Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:39:35Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:39:35Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:40:12Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:40:12Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:43:11Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:43:11Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:55:10Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:55:10Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:55:41Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:55:41Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:55:49Z" reason: MongosReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:56:04Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:56:47Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:56:47Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:57:23Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:57:23Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:58:00Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:58:00Z" status: "True" type: initializing host: 34.29.171.73 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod5.0 mongoVersion: 5.0.20-17 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 0 replsets: cfg: initialized: true ready: 0 size: 3 status: initializing rs0: added_as_shard: true initialized: true ready: 0 size: 3 status: initializing size: 6 state: initializing + cat /tmp/tmp.Y7WR3OwSIe + rm /tmp/tmp.2OMYcwKv0z /tmp/tmp.Y7WR3OwSIe + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nJJheI3Or8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.4Sdf4Y2mWC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nJJheI3Or8 ++ cat /tmp/tmp.4Sdf4Y2mWC ++ rm /tmp/tmp.nJJheI3Or8 /tmp/tmp.4Sdf4Y2mWC ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UuGpdELif9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.tleLbcT6Na ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UuGpdELif9 ++ cat /tmp/tmp.tleLbcT6Na ++ rm /tmp/tmp.UuGpdELif9 /tmp/tmp.tleLbcT6Na ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YjwwmGVEtm +++ mktemp ++ local LAST_ERR=/tmp/tmp.3o3mhKA51e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YjwwmGVEtm ++ cat /tmp/tmp.3o3mhKA51e ++ rm /tmp/tmp.YjwwmGVEtm /tmp/tmp.3o3mhKA51e ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8qqEcW4juV +++ mktemp ++ local LAST_ERR=/tmp/tmp.MdFjwcsFGb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8qqEcW4juV ++ cat /tmp/tmp.MdFjwcsFGb ++ rm /tmp/tmp.8qqEcW4juV /tmp/tmp.MdFjwcsFGb ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xqyIQSDGY4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.sVZpIwCWM8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xqyIQSDGY4 ++ cat /tmp/tmp.sVZpIwCWM8 ++ rm /tmp/tmp.xqyIQSDGY4 /tmp/tmp.sVZpIwCWM8 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nqjsGSVjUt +++ mktemp ++ local LAST_ERR=/tmp/tmp.EMnKhSyW9G ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nqjsGSVjUt ++ cat /tmp/tmp.EMnKhSyW9G ++ rm /tmp/tmp.nqjsGSVjUt /tmp/tmp.EMnKhSyW9G ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.e2Uv05ailx +++ mktemp ++ local LAST_ERR=/tmp/tmp.lG2DTQ8TAW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.e2Uv05ailx ++ cat /tmp/tmp.lG2DTQ8TAW ++ rm /tmp/tmp.e2Uv05ailx /tmp/tmp.lG2DTQ8TAW ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.owPil2pKPo +++ mktemp ++ local LAST_ERR=/tmp/tmp.Yk9DY3Ixrl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.owPil2pKPo ++ cat /tmp/tmp.Yk9DY3Ixrl ++ rm /tmp/tmp.owPil2pKPo /tmp/tmp.Yk9DY3Ixrl ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KzS3zzCo1t +++ mktemp ++ local LAST_ERR=/tmp/tmp.p9J8SPr5s0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KzS3zzCo1t ++ cat /tmp/tmp.p9J8SPr5s0 ++ rm /tmp/tmp.KzS3zzCo1t /tmp/tmp.p9J8SPr5s0 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KUXCyZx88K +++ mktemp ++ local LAST_ERR=/tmp/tmp.nY69f2Ya2a ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KUXCyZx88K ++ cat /tmp/tmp.nY69f2Ya2a ++ rm /tmp/tmp.KUXCyZx88K /tmp/tmp.nY69f2Ya2a ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QQhiBS0Z4u +++ mktemp ++ local LAST_ERR=/tmp/tmp.3uXNSKzhoX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QQhiBS0Z4u ++ cat /tmp/tmp.3uXNSKzhoX ++ rm /tmp/tmp.QQhiBS0Z4u /tmp/tmp.3uXNSKzhoX ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wHzHvKZMAD +++ mktemp ++ local LAST_ERR=/tmp/tmp.Cz0TFh3vt9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wHzHvKZMAD ++ cat /tmp/tmp.Cz0TFh3vt9 ++ rm /tmp/tmp.wHzHvKZMAD /tmp/tmp.Cz0TFh3vt9 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OOGXEQ2Nlb +++ mktemp ++ local LAST_ERR=/tmp/tmp.k8xHp7920B ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OOGXEQ2Nlb ++ cat /tmp/tmp.k8xHp7920B ++ rm /tmp/tmp.OOGXEQ2Nlb /tmp/tmp.k8xHp7920B ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MhoPp6z7st +++ mktemp ++ local LAST_ERR=/tmp/tmp.0gAnvhnm2V ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MhoPp6z7st ++ cat /tmp/tmp.0gAnvhnm2V ++ rm /tmp/tmp.MhoPp6z7st /tmp/tmp.0gAnvhnm2V ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wBb3p3GsjK +++ mktemp ++ local LAST_ERR=/tmp/tmp.IJkSGo8M51 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wBb3p3GsjK ++ cat /tmp/tmp.IJkSGo8M51 ++ rm /tmp/tmp.wBb3p3GsjK /tmp/tmp.IJkSGo8M51 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sjuehehSHt +++ mktemp ++ local LAST_ERR=/tmp/tmp.vJdoDqYBwC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sjuehehSHt ++ cat /tmp/tmp.vJdoDqYBwC ++ rm /tmp/tmp.sjuehehSHt /tmp/tmp.vJdoDqYBwC ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Fx1ODKGZy5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.tuQDmFrhMH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Fx1ODKGZy5 ++ cat /tmp/tmp.tuQDmFrhMH ++ rm /tmp/tmp.Fx1ODKGZy5 /tmp/tmp.tuQDmFrhMH ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8gpTx9WX5E +++ mktemp ++ local LAST_ERR=/tmp/tmp.XrC5brlaha ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8gpTx9WX5E ++ cat /tmp/tmp.XrC5brlaha ++ rm /tmp/tmp.8gpTx9WX5E /tmp/tmp.XrC5brlaha ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 17 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Mwdh6bkmol +++ mktemp ++ local LAST_ERR=/tmp/tmp.UX4HRPbI8q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Mwdh6bkmol ++ cat /tmp/tmp.UX4HRPbI8q ++ rm /tmp/tmp.Mwdh6bkmol /tmp/tmp.UX4HRPbI8q ++ return 0 + [[ ready == \r\e\a\d\y ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 mongodb '' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MbM13Ha1ly +++ mktemp ++ local LAST_ERR=/tmp/tmp.cJJauepjXT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MbM13Ha1ly ++ cat /tmp/tmp.cJJauepjXT ++ rm /tmp/tmp.MbM13Ha1ly /tmp/tmp.cJJauepjXT ++ return 0 + local client_container=psmdb-client-577b7bcc6c-rxlrk + local mongo_flag= + kubectl_bin exec psmdb-client-577b7bcc6c-rxlrk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.ArCnWDzzc5 ++ mktemp + local LAST_ERR=/tmp/tmp.8vTZYJbrec + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-577b7bcc6c-rxlrk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ArCnWDzzc5 + cat /tmp/tmp.8vTZYJbrec + rm /tmp/tmp.ArCnWDzzc5 /tmp/tmp.8vTZYJbrec + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/find-sharded.json /tmp/tmp.0TFKOgWJzU/find-sharded + echo + set -o xtrace + check_exported_mongos_service_endpoint 34.29.171.73 + local host=34.29.171.73 ++ kubectl_bin get psmdb some-name '-o=jsonpath={.status.host}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ju3ZTtuBbP +++ mktemp ++ local LAST_ERR=/tmp/tmp.6yszQlbQgY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name '-o=jsonpath={.status.host}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ju3ZTtuBbP ++ cat /tmp/tmp.6yszQlbQgY ++ rm /tmp/tmp.ju3ZTtuBbP /tmp/tmp.6yszQlbQgY ++ return 0 + '[' 34.29.171.73 '!=' 34.29.171.73 ']' + echo 'drop collection' drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ewCVTvvFfp +++ mktemp ++ local LAST_ERR=/tmp/tmp.HwuhpDPkJx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ewCVTvvFfp ++ cat /tmp/tmp.HwuhpDPkJx ++ rm /tmp/tmp.ewCVTvvFfp /tmp/tmp.HwuhpDPkJx ++ return 0 + local client_container=psmdb-client-577b7bcc6c-rxlrk + local mongo_flag= + kubectl_bin exec psmdb-client-577b7bcc6c-rxlrk -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.CKG8eTFUwI ++ mktemp + local LAST_ERR=/tmp/tmp.cYAI1Kttcg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-577b7bcc6c-rxlrk -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CKG8eTFUwI Percona Server for MongoDB shell version v4.4.24-23 connecting to: mongodb://some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("5574a903-da14-42b6-9851-a65668461998") } Percona Server for MongoDB server version: v5.0.20-17 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.cYAI1Kttcg + rm /tmp/tmp.CKG8eTFUwI /tmp/tmp.cYAI1Kttcg + return 0 + echo 'check backup and restore -- minio' check backup and restore -- minio ++ get_backup_dest backup-minio-sharded ++ local backup_name=backup-minio-sharded ++ kubectl_bin get psmdb-backup backup-minio-sharded -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XF0lLwUa49 +++ mktemp ++ local LAST_ERR=/tmp/tmp.f4WfQIfa87 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-minio-sharded -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XF0lLwUa49 ++ cat /tmp/tmp.f4WfQIfa87 ++ rm /tmp/tmp.XF0lLwUa49 /tmp/tmp.f4WfQIfa87 ++ return 0 + backup_dest_minio=operator-testing/2023-10-05T19:20:44Z + run_restore backup-minio-sharded _restore_sharded + local backup_name=backup-minio-sharded + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/conf/restore.yml + /usr/bin/sed -e 's/backupName:/backupName: backup-minio-sharded/' + /usr/bin/sed -e 's/name:/name: restore-backup-minio-sharded/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.YNZSSYZZBN ++ mktemp + local LAST_ERR=/tmp/tmp.08mnho3ED5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YNZSSYZZBN perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-sharded created + cat /tmp/tmp.08mnho3ED5 + rm /tmp/tmp.YNZSSYZZBN /tmp/tmp.08mnho3ED5 + return 0 + run_recovery_check backup-minio-sharded _restore_sharded + local backup_name=backup-minio-sharded + local compare_suffix=_restore_sharded + wait_restore backup-minio-sharded some-name requested 0 420 + local backup_name=backup-minio-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=420 + set +o xtrace waiting psmdb-restore/backup-minio-sharded to reach requested state......................................................................... + '[' 0 -eq 1 ']' + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-physical-sharded-6860", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.1qR50gZOq5 ++ mktemp + local LAST_ERR=/tmp/tmp.Ta79nEdpB4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1qR50gZOq5 + cat /tmp/tmp.Ta79nEdpB4 + rm /tmp/tmp.1qR50gZOq5 /tmp/tmp.Ta79nEdpB4 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.24 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.0TFKOgWJzU/statefulset_some-name-rs0.yml + wait_restore backup-minio-sharded some-name ready 0 900 + local backup_name=backup-minio-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=900 + set +o xtrace waiting psmdb-restore/backup-minio-sharded to reach ready state.................................................................................. + '[' 0 -eq 1 ']' + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.sftVmXqFQk ++ mktemp + local LAST_ERR=/tmp/tmp.aNu0v7ZGSZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sftVmXqFQk apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-physical-sharded-6860"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-physical-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"minio":{"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod5.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"exposeType":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2023-10-05T19:16:13Z" generation: 2 name: some-name namespace: demand-backup-physical-sharded-6860 resourceVersion: "37799" uid: 42c790e2-c70a-4f39-8fbb-1ddb01cc66f6 spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-physical-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 minio: s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.15.0 image: perconalab/percona-server-mongodb-operator:main-mongod5.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: exposeType: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: conditions: - lastTransitionTime: "2023-10-05T19:55:49Z" reason: MongosReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:56:04Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:56:47Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:56:47Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:57:23Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:57:23Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T19:58:00Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T19:58:00Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T20:04:57Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T20:04:57Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T20:05:28Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T20:05:28Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T20:05:41Z" reason: MongosReady status: "True" type: ready - lastTransitionTime: "2023-10-05T20:05:56Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T20:06:38Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T20:06:38Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T20:07:15Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T20:07:15Z" status: "True" type: initializing - lastTransitionTime: "2023-10-05T20:07:51Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2023-10-05T20:07:51Z" status: "True" type: initializing host: 34.29.171.73 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod5.0 mongoVersion: 5.0.20-17 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 0 replsets: cfg: initialized: true ready: 0 size: 3 status: initializing rs0: added_as_shard: true initialized: true ready: 0 size: 3 status: initializing size: 6 state: initializing + cat /tmp/tmp.aNu0v7ZGSZ + rm /tmp/tmp.sftVmXqFQk /tmp/tmp.aNu0v7ZGSZ + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jfBgkALNAq +++ mktemp ++ local LAST_ERR=/tmp/tmp.LV46xXH8ZK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jfBgkALNAq ++ cat /tmp/tmp.LV46xXH8ZK ++ rm /tmp/tmp.jfBgkALNAq /tmp/tmp.LV46xXH8ZK ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CbyhrtWCKG +++ mktemp ++ local LAST_ERR=/tmp/tmp.BpFOCFZtg8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CbyhrtWCKG ++ cat /tmp/tmp.BpFOCFZtg8 ++ rm /tmp/tmp.CbyhrtWCKG /tmp/tmp.BpFOCFZtg8 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tKKu3yDkPY +++ mktemp ++ local LAST_ERR=/tmp/tmp.9lHnMCIN2p ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tKKu3yDkPY ++ cat /tmp/tmp.9lHnMCIN2p ++ rm /tmp/tmp.tKKu3yDkPY /tmp/tmp.9lHnMCIN2p ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xSs4Gb30Ai +++ mktemp ++ local LAST_ERR=/tmp/tmp.mJG2fg7owy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xSs4Gb30Ai ++ cat /tmp/tmp.mJG2fg7owy ++ rm /tmp/tmp.xSs4Gb30Ai /tmp/tmp.mJG2fg7owy ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RgzOmOXqrR +++ mktemp ++ local LAST_ERR=/tmp/tmp.zpdtI0WspY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RgzOmOXqrR ++ cat /tmp/tmp.zpdtI0WspY ++ rm /tmp/tmp.RgzOmOXqrR /tmp/tmp.zpdtI0WspY ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xaSdOxzaoj +++ mktemp ++ local LAST_ERR=/tmp/tmp.fSWp94BLCp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xaSdOxzaoj ++ cat /tmp/tmp.fSWp94BLCp ++ rm /tmp/tmp.xaSdOxzaoj /tmp/tmp.fSWp94BLCp ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zs84ecmGZj +++ mktemp ++ local LAST_ERR=/tmp/tmp.gjDCLXIiKB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zs84ecmGZj ++ cat /tmp/tmp.gjDCLXIiKB ++ rm /tmp/tmp.zs84ecmGZj /tmp/tmp.gjDCLXIiKB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lsmM7TRLck +++ mktemp ++ local LAST_ERR=/tmp/tmp.LDB6IOtFqt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lsmM7TRLck ++ cat /tmp/tmp.LDB6IOtFqt ++ rm /tmp/tmp.lsmM7TRLck /tmp/tmp.LDB6IOtFqt ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JUTVTzYYZg +++ mktemp ++ local LAST_ERR=/tmp/tmp.04q54MgsUr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JUTVTzYYZg ++ cat /tmp/tmp.04q54MgsUr ++ rm /tmp/tmp.JUTVTzYYZg /tmp/tmp.04q54MgsUr ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tPx9yB42T7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.5zXe7LglNp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tPx9yB42T7 ++ cat /tmp/tmp.5zXe7LglNp ++ rm /tmp/tmp.tPx9yB42T7 /tmp/tmp.5zXe7LglNp ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fLp19FtYF2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.6ZVf2SCwFk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fLp19FtYF2 ++ cat /tmp/tmp.6ZVf2SCwFk ++ rm /tmp/tmp.fLp19FtYF2 /tmp/tmp.6ZVf2SCwFk ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IEo9OG5KXN +++ mktemp ++ local LAST_ERR=/tmp/tmp.MtMyjVAwBT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IEo9OG5KXN ++ cat /tmp/tmp.MtMyjVAwBT ++ rm /tmp/tmp.IEo9OG5KXN /tmp/tmp.MtMyjVAwBT ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gnBaRDH9Nf +++ mktemp ++ local LAST_ERR=/tmp/tmp.EfkVvwqTzM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gnBaRDH9Nf ++ cat /tmp/tmp.EfkVvwqTzM ++ rm /tmp/tmp.gnBaRDH9Nf /tmp/tmp.EfkVvwqTzM ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.z0ywcltXPq +++ mktemp ++ local LAST_ERR=/tmp/tmp.3kDuKNdcIB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.z0ywcltXPq ++ cat /tmp/tmp.3kDuKNdcIB ++ rm /tmp/tmp.z0ywcltXPq /tmp/tmp.3kDuKNdcIB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vSXSxBbQlU +++ mktemp ++ local LAST_ERR=/tmp/tmp.0Ainw9vdim ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vSXSxBbQlU ++ cat /tmp/tmp.0Ainw9vdim ++ rm /tmp/tmp.vSXSxBbQlU /tmp/tmp.0Ainw9vdim ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GLNGbiRzCS +++ mktemp ++ local LAST_ERR=/tmp/tmp.5ZvReuJo4Z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GLNGbiRzCS ++ cat /tmp/tmp.5ZvReuJo4Z ++ rm /tmp/tmp.GLNGbiRzCS /tmp/tmp.5ZvReuJo4Z ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kYkPxWVH2d +++ mktemp ++ local LAST_ERR=/tmp/tmp.mYWipzlliz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kYkPxWVH2d ++ cat /tmp/tmp.mYWipzlliz ++ rm /tmp/tmp.kYkPxWVH2d /tmp/tmp.mYWipzlliz ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xsemnoUpNK +++ mktemp ++ local LAST_ERR=/tmp/tmp.LDjhbtmOj4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xsemnoUpNK ++ cat /tmp/tmp.LDjhbtmOj4 ++ rm /tmp/tmp.xsemnoUpNK /tmp/tmp.LDjhbtmOj4 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 17 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oqI2REjmOG +++ mktemp ++ local LAST_ERR=/tmp/tmp.yKuLdELouU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oqI2REjmOG ++ cat /tmp/tmp.yKuLdELouU ++ rm /tmp/tmp.oqI2REjmOG /tmp/tmp.yKuLdELouU ++ return 0 + [[ ready == \r\e\a\d\y ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 mongodb '' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iA7nAud1aX +++ mktemp ++ local LAST_ERR=/tmp/tmp.2fi8KagLjX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iA7nAud1aX ++ cat /tmp/tmp.2fi8KagLjX ++ rm /tmp/tmp.iA7nAud1aX /tmp/tmp.2fi8KagLjX ++ return 0 + local client_container=psmdb-client-577b7bcc6c-rxlrk + local mongo_flag= + kubectl_bin exec psmdb-client-577b7bcc6c-rxlrk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.418pBo2rvz ++ mktemp + local LAST_ERR=/tmp/tmp.Oc0m5PO2qy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-577b7bcc6c-rxlrk -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-6860.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.418pBo2rvz + cat /tmp/tmp.Oc0m5PO2qy + rm /tmp/tmp.418pBo2rvz /tmp/tmp.Oc0m5PO2qy + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/e2e-tests/demand-backup-physical-sharded/compare/find-sharded.json /tmp/tmp.0TFKOgWJzU/find-sharded + echo + set -o xtrace + destroy demand-backup-physical-sharded-6860 + local namespace=demand-backup-physical-sharded-6860 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.7ti2DRsunO ++ mktemp + local LAST_ERR=/tmp/tmp.gaPIguc98g + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7ti2DRsunO customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.gaPIguc98g + rm /tmp/tmp.7ti2DRsunO /tmp/tmp.gaPIguc98g + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-physical-sharded-6860 backup-minio-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-minio-sharded patched + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.Y2w0kmXkn7 ++ mktemp + local LAST_ERR=/tmp/tmp.BkjsQA5EwE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Y2w0kmXkn7 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com condition met + cat /tmp/tmp.BkjsQA5EwE + rm /tmp/tmp.Y2w0kmXkn7 /tmp/tmp.BkjsQA5EwE + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.b7bIrrMcTt ++ mktemp + local LAST_ERR=/tmp/tmp.kt0O2r59Vl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.b7bIrrMcTt + cat /tmp/tmp.kt0O2r59Vl + rm /tmp/tmp.b7bIrrMcTt /tmp/tmp.kt0O2r59Vl + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.GOTwLPONXY ++ mktemp + local LAST_ERR=/tmp/tmp.VyR3nj6jaH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GOTwLPONXY + cat /tmp/tmp.VyR3nj6jaH + rm /tmp/tmp.GOTwLPONXY /tmp/tmp.VyR3nj6jaH + return 0 + local rbac_yaml=rbac.yaml + '[' -n '' ']' + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/deploy/rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.40LO6QaPwl ++ mktemp + local LAST_ERR=/tmp/tmp.b0FQgOJvpq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1352/deploy/rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.40LO6QaPwl role.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted serviceaccount "percona-server-mongodb-operator" deleted rolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.b0FQgOJvpq + rm /tmp/tmp.40LO6QaPwl /tmp/tmp.b0FQgOJvpq + return 0 + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.4/cert-manager.yaml + : + '[' -n '' ']' + '[' -n '' ']' + rm -rf /tmp/tmp.0TFKOgWJzU + kubectl_bin delete --grace-period=0 --force=true namespace demand-backup-physical-sharded-6860 ++ mktemp + local LAST_OUT=/tmp/tmp.jlIIsqGSeY ++ mktemp + local LAST_ERR=/tmp/tmp.YbiXAq7lap + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace demand-backup-physical-sharded-6860