Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/logs/demand-backup-physical-sharded.log WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 + create_infra demand-backup-physical-sharded-17205 + local ns=demand-backup-physical-sharded-17205 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.pSZ2OycW5c ++ mktemp + local LAST_ERR=/tmp/tmp.AGVVc1bF8M + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pSZ2OycW5c customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.AGVVc1bF8M + rm /tmp/tmp.pSZ2OycW5c /tmp/tmp.AGVVc1bF8M + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-physical-sharded-12735 backup-aws-s3-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-aws-s3-sharded patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-physical-sharded-12735 backup-azure-blob-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob-sharded patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-physical-sharded-12735 backup-gcp-cs-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-gcp-cs-sharded patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-physical-sharded-12735 backup-minio-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' Error from server (NotFound): perconaservermongodbbackups.psmdb.percona.com "backup-minio-sharded" not found + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.EhOT6pmE3a ++ mktemp + local LAST_ERR=/tmp/tmp.J7sqS7funO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EhOT6pmE3a customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com condition met + cat /tmp/tmp.J7sqS7funO + rm /tmp/tmp.EhOT6pmE3a /tmp/tmp.J7sqS7funO + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ur81eXwETT ++ mktemp + local LAST_ERR=/tmp/tmp.m9qkwVXGTN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ur81eXwETT + cat /tmp/tmp.m9qkwVXGTN + rm /tmp/tmp.ur81eXwETT /tmp/tmp.m9qkwVXGTN + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.uEwNdTADD3 ++ mktemp + local LAST_ERR=/tmp/tmp.ONdWGrZPNO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uEwNdTADD3 + cat /tmp/tmp.ONdWGrZPNO + rm /tmp/tmp.uEwNdTADD3 /tmp/tmp.ONdWGrZPNO + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.aE6QtXMuHh ++ mktemp + local LAST_ERR=/tmp/tmp.L2OsP44Mfp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aE6QtXMuHh clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.L2OsP44Mfp + rm /tmp/tmp.aE6QtXMuHh /tmp/tmp.L2OsP44Mfp + return 0 + check_crd_for_deletion PR-1563-2831d13a + local git_tag=PR-1563-2831d13a ++ /usr/bin/sed s/---//g ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ yq eval .metadata.name ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1563-2831d13a/deploy/crd.yaml + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mvEoBWQuuA +++ mktemp ++ local LAST_ERR=/tmp/tmp.zKssrxauXD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.mvEoBWQuuA ++ cat /tmp/tmp.zKssrxauXD Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.mvEoBWQuuA ++ cat /tmp/tmp.zKssrxauXD Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.mvEoBWQuuA ++ cat /tmp/tmp.zKssrxauXD Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.mvEoBWQuuA ++ cat /tmp/tmp.zKssrxauXD Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.mvEoBWQuuA /tmp/tmp.zKssrxauXD ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl api-resources ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrole ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + awk '{print$1}' + xargs kubectl delete ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ mktemp + kubectl_bin get ns + local LAST_OUT=/tmp/tmp.DiuV9GhrOe ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.LnunW4sbRL + local exit_status=0 + local timeout=4 + local LAST_OUT=/tmp/tmp.bFqlITgXYv ++ seq 0 2 ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + local LAST_ERR=/tmp/tmp.T7EKxmmjIO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bFqlITgXYv + cat /tmp/tmp.T7EKxmmjIO + rm /tmp/tmp.bFqlITgXYv /tmp/tmp.T7EKxmmjIO + return 0 namespace "demand-backup-physical-sharded-12735" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DiuV9GhrOe namespace "psmdb-operator" deleted + cat /tmp/tmp.LnunW4sbRL + rm /tmp/tmp.DiuV9GhrOe /tmp/tmp.LnunW4sbRL + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.YFYQGZmgWi ++ mktemp + local LAST_ERR=/tmp/tmp.RGMSQpRQWH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YFYQGZmgWi + cat /tmp/tmp.RGMSQpRQWH + rm /tmp/tmp.YFYQGZmgWi /tmp/tmp.RGMSQpRQWH + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ncn1H41hTK ++ mktemp + local LAST_ERR=/tmp/tmp.hzdPpBnkkK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ncn1H41hTK namespace/psmdb-operator created + cat /tmp/tmp.hzdPpBnkkK + rm /tmp/tmp.ncn1H41hTK /tmp/tmp.hzdPpBnkkK + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.LEKK2gKmx3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.SRY9NQ0O4X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LEKK2gKmx3 ++ cat /tmp/tmp.SRY9NQ0O4X ++ rm /tmp/tmp.LEKK2gKmx3 /tmp/tmp.SRY9NQ0O4X ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1563-2831d13a-2-cluster8 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.kkythHTgdw ++ mktemp + local LAST_ERR=/tmp/tmp.8AfNYCFpmd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1563-2831d13a-2-cluster8 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kkythHTgdw Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1563-2831d13a-2-cluster8" modified. + cat /tmp/tmp.8AfNYCFpmd + rm /tmp/tmp.kkythHTgdw /tmp/tmp.8AfNYCFpmd + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.0rD5jSysll ++ mktemp + local LAST_ERR=/tmp/tmp.JHiCFWU02S + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0rD5jSysll customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.JHiCFWU02S + rm /tmp/tmp.0rD5jSysll /tmp/tmp.JHiCFWU02S + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/deploy/cw-rbac.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.GSWyDqZd96 + sed -e 's^namespace: .*^namespace: psmdb-operator^' ++ mktemp + local LAST_ERR=/tmp/tmp.MHPnFkySOo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GSWyDqZd96 clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.MHPnFkySOo + rm /tmp/tmp.GSWyDqZd96 /tmp/tmp.MHPnFkySOo + return 0 + kubectl_bin apply -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1563-2831d13a") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.CMwMVBEQz5 ++ mktemp + local LAST_ERR=/tmp/tmp.26Q2dt2kOz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CMwMVBEQz5 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.26Q2dt2kOz + rm /tmp/tmp.CMwMVBEQz5 /tmp/tmp.26Q2dt2kOz + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.JhkWfHNIuI +++ mktemp ++ local LAST_ERR=/tmp/tmp.IuW43fPWuE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JhkWfHNIuI ++ cat /tmp/tmp.IuW43fPWuE ++ rm /tmp/tmp.JhkWfHNIuI /tmp/tmp.IuW43fPWuE ++ return 0 + wait_pod percona-server-mongodb-operator-5db9f57b69-kxz75 + local pod=percona-server-mongodb-operator-5db9f57b69-kxz75 + set +o xtrace waiting for pod/percona-server-mongodb-operator-5db9f57b69-kxz75 to be ready.OK + create_namespace demand-backup-physical-sharded-17205 + local namespace=demand-backup-physical-sharded-17205 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' ++ tail -n1 + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces demand-backup-physical-sharded-17205' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-physical-sharded-17205 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace demand-backup-physical-sharded-17205 --ignore-not-found + xargs kubectl delete ns ++ mktemp + awk '{print$1}' + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + local LAST_OUT=/tmp/tmp.exZGRWTR9l ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.1EKxH6Noi2 + local exit_status=0 + local timeout=4 + local LAST_OUT=/tmp/tmp.Uoqpc0awPw ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace demand-backup-physical-sharded-17205 --ignore-not-found ++ mktemp + local LAST_ERR=/tmp/tmp.v5Vyn0fTRv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Uoqpc0awPw + cat /tmp/tmp.v5Vyn0fTRv + rm /tmp/tmp.Uoqpc0awPw /tmp/tmp.v5Vyn0fTRv + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.exZGRWTR9l + cat /tmp/tmp.1EKxH6Noi2 + rm /tmp/tmp.exZGRWTR9l /tmp/tmp.1EKxH6Noi2 + return 0 + kubectl_bin wait --for=delete namespace demand-backup-physical-sharded-17205 ++ mktemp + local LAST_OUT=/tmp/tmp.A4zRIBUOLj ++ mktemp + local LAST_ERR=/tmp/tmp.zvRhEpVec1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace demand-backup-physical-sharded-17205 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.A4zRIBUOLj + cat /tmp/tmp.zvRhEpVec1 + rm /tmp/tmp.A4zRIBUOLj /tmp/tmp.zvRhEpVec1 + return 0 + desc 'create namespace demand-backup-physical-sharded-17205' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-physical-sharded-17205 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-physical-sharded-17205 ++ mktemp + local LAST_OUT=/tmp/tmp.bp9MHCb4Wj ++ mktemp + local LAST_ERR=/tmp/tmp.MvcB2EicIy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace demand-backup-physical-sharded-17205 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bp9MHCb4Wj namespace/demand-backup-physical-sharded-17205 created + cat /tmp/tmp.MvcB2EicIy + rm /tmp/tmp.bp9MHCb4Wj /tmp/tmp.MvcB2EicIy + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.ivpEKdDboJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.LpJX66QhSA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ivpEKdDboJ ++ cat /tmp/tmp.LpJX66QhSA ++ rm /tmp/tmp.ivpEKdDboJ /tmp/tmp.LpJX66QhSA ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1563-2831d13a-2-cluster8 --namespace=demand-backup-physical-sharded-17205 ++ mktemp + local LAST_OUT=/tmp/tmp.Cd3FGuqoip ++ mktemp + local LAST_ERR=/tmp/tmp.9Rfci8kCyC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1563-2831d13a-2-cluster8 --namespace=demand-backup-physical-sharded-17205 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Cd3FGuqoip Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1563-2831d13a-2-cluster8" modified. + cat /tmp/tmp.9Rfci8kCyC + rm /tmp/tmp.Cd3FGuqoip /tmp/tmp.9Rfci8kCyC + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Tue Jun 4 12:24:44 2024 NAMESPACE: demand-backup-physical-sharded-17205 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.demand-backup-physical-sharded-17205.svc.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace demand-backup-physical-sharded-17205 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace demand-backup-physical-sharded-17205 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace demand-backup-physical-sharded-17205 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace demand-backup-physical-sharded-17205 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ppRKK8Tsl6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.P8pAWkRSZ7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ppRKK8Tsl6 ++ cat /tmp/tmp.P8pAWkRSZ7 ++ rm /tmp/tmp.ppRKK8Tsl6 /tmp/tmp.P8pAWkRSZ7 ++ return 0 + MINIO_POD=minio-service-57dd49b-blr2n + wait_pod minio-service-57dd49b-blr2n + local pod=minio-service-57dd49b-blr2n + set +o xtrace waiting for pod/minio-service-57dd49b-blr2n to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-physical-sharded-17205.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.7b4lClE2mr ++ mktemp + local LAST_ERR=/tmp/tmp.ZbI8jSAiJd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-physical-sharded-17205.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7b4lClE2mr service/minio-service created + cat /tmp/tmp.ZbI8jSAiJd + rm /tmp/tmp.7b4lClE2mr /tmp/tmp.ZbI8jSAiJd + return 0 + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.5Gvpu4z4vK ++ mktemp + local LAST_ERR=/tmp/tmp.cJB0LKxtJV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5Gvpu4z4vK make_bucket: operator-testing pod "aws-cli" deleted + cat /tmp/tmp.cJB0LKxtJV + rm /tmp/tmp.5Gvpu4z4vK /tmp/tmp.cJB0LKxtJV + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.qOpqE3DCoJ ++ mktemp + local LAST_ERR=/tmp/tmp.KQGO0qTZiK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qOpqE3DCoJ secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.KQGO0qTZiK + rm /tmp/tmp.qOpqE3DCoJ /tmp/tmp.KQGO0qTZiK + return 0 + desc 'Testing on sharded cluster' + set +o xtrace ----------------------------------------------------------------------------------- Testing on sharded cluster ----------------------------------------------------------------------------------- + echo 'Creating PSMDB cluster' Creating PSMDB cluster + cluster=some-name + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.bInlVQCNk0 ++ mktemp + local LAST_ERR=/tmp/tmp.JTR6pmtNTS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bInlVQCNk0 secret/some-users created + cat /tmp/tmp.JTR6pmtNTS + rm /tmp/tmp.bInlVQCNk0 /tmp/tmp.JTR6pmtNTS + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/conf/some-name-sharded.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/conf/some-name-sharded.yml + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/conf/some-name-sharded.yml + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1563-2831d13a"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.0UZLkiDSdS ++ mktemp + local LAST_ERR=/tmp/tmp.jQerHU7eFO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0UZLkiDSdS perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.jQerHU7eFO + rm /tmp/tmp.0UZLkiDSdS /tmp/tmp.jQerHU7eFO + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.npMhA1vylQ ++ mktemp + local LAST_ERR=/tmp/tmp.Zr6RUvuSRq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/conf/client_with_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.npMhA1vylQ deployment.apps/psmdb-client created + cat /tmp/tmp.Zr6RUvuSRq + rm /tmp/tmp.npMhA1vylQ /tmp/tmp.Zr6RUvuSRq + return 0 + echo 'check if all pods started' check if all pods started + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready......OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EPs3By830F +++ mktemp ++ local LAST_ERR=/tmp/tmp.N40LqN1ddh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EPs3By830F ++ cat /tmp/tmp.N40LqN1ddh ++ rm /tmp/tmp.EPs3By830F /tmp/tmp.N40LqN1ddh ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.95nlhp1pMn +++ mktemp ++ local LAST_ERR=/tmp/tmp.QgfmFQ2rv0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.95nlhp1pMn ++ cat /tmp/tmp.QgfmFQ2rv0 ++ rm /tmp/tmp.95nlhp1pMn /tmp/tmp.QgfmFQ2rv0 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness................................... + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YY0dPqNEnY +++ mktemp ++ local LAST_ERR=/tmp/tmp.loYUq8I3HF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YY0dPqNEnY ++ cat /tmp/tmp.loYUq8I3HF ++ rm /tmp/tmp.YY0dPqNEnY /tmp/tmp.loYUq8I3HF ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GNSSZme7fH +++ mktemp ++ local LAST_ERR=/tmp/tmp.KRpMlCI4oC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GNSSZme7fH ++ cat /tmp/tmp.KRpMlCI4oC ++ rm /tmp/tmp.GNSSZme7fH /tmp/tmp.KRpMlCI4oC ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.r6fjYh5cgi +++ mktemp ++ local LAST_ERR=/tmp/tmp.uq99IAxdIT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.r6fjYh5cgi ++ cat /tmp/tmp.uq99IAxdIT ++ rm /tmp/tmp.r6fjYh5cgi /tmp/tmp.uq99IAxdIT ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aIUck9kuJY +++ mktemp ++ local LAST_ERR=/tmp/tmp.dP3HodcqvM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aIUck9kuJY ++ cat /tmp/tmp.dP3HodcqvM ++ rm /tmp/tmp.aIUck9kuJY /tmp/tmp.dP3HodcqvM ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jEcFl1IWHs +++ mktemp ++ local LAST_ERR=/tmp/tmp.89Sy2XW7Hr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jEcFl1IWHs ++ cat /tmp/tmp.89Sy2XW7Hr ++ rm /tmp/tmp.jEcFl1IWHs /tmp/tmp.89Sy2XW7Hr ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ jq -r 'select(.loadBalancer != null and .loadBalancer.ingress != null and .loadBalancer.ingress != []) | .loadBalancer.ingress[0][]' ++ kubectl_bin get svc some-name-mongos '-o=jsonpath={.status}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4dIq3WC9VD +++ mktemp ++ local LAST_ERR=/tmp/tmp.QFbfOK6N9D ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get svc some-name-mongos '-o=jsonpath={.status}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4dIq3WC9VD ++ cat /tmp/tmp.QFbfOK6N9D ++ rm /tmp/tmp.4dIq3WC9VD /tmp/tmp.QFbfOK6N9D ++ return 0 + lbEndpoint=34.133.190.96 + '[' -z 34.133.190.96 ']' + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.demand-backup-physical-sharded-17205 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.demand-backup-physical-sharded-17205 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yMSx7YueU0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.In9nOEjPlI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yMSx7YueU0 ++ cat /tmp/tmp.In9nOEjPlI ++ rm /tmp/tmp.yMSx7YueU0 /tmp/tmp.In9nOEjPlI ++ return 0 + local client_container=psmdb-client-5f578b7f94-66hkw + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.ZATkdwb9C8 ++ mktemp + local LAST_ERR=/tmp/tmp.U0Ods8pTG1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZATkdwb9C8 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("faa6698c-0575-4bab-8eb3-84099332f9e0") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.U0Ods8pTG1 + rm /tmp/tmp.ZATkdwb9C8 /tmp/tmp.U0Ods8pTG1 + return 0 + sleep 1 + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.78I99AMk8e +++ mktemp ++ local LAST_ERR=/tmp/tmp.s2j840hW4e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.78I99AMk8e ++ cat /tmp/tmp.s2j840hW4e ++ rm /tmp/tmp.78I99AMk8e /tmp/tmp.s2j840hW4e ++ return 0 + local client_container=psmdb-client-5f578b7f94-66hkw + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.8wfHrkCsf8 ++ mktemp + local LAST_ERR=/tmp/tmp.xmaCTMXjuW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8wfHrkCsf8 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("f792949a-506b-4140-9d94-2cd6ddb33990") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.xmaCTMXjuW + rm /tmp/tmp.8wfHrkCsf8 /tmp/tmp.xmaCTMXjuW + return 0 + sleep 5 + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.dhbAFRShkv +++ mktemp ++ local LAST_ERR=/tmp/tmp.ItoXTgKcHn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dhbAFRShkv ++ cat /tmp/tmp.ItoXTgKcHn ++ rm /tmp/tmp.dhbAFRShkv /tmp/tmp.ItoXTgKcHn ++ return 0 + local client_container=psmdb-client-5f578b7f94-66hkw + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.WwvRxlwV1t ++ mktemp + local LAST_ERR=/tmp/tmp.R4yyhxVFHB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WwvRxlwV1t + cat /tmp/tmp.R4yyhxVFHB + rm /tmp/tmp.WwvRxlwV1t /tmp/tmp.R4yyhxVFHB + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/find-sharded.json /tmp/tmp.XHwOP4Nfog/find-sharded + echo 'waiting 60 seconds for stable timestamp in wiredtiger' waiting 60 seconds for stable timestamp in wiredtiger + sleep 60 + echo 'running backups' running backups + backup_name_minio=backup-minio-sharded + run_backup minio backup-minio-sharded + local storage=minio + local backup_name=backup-minio-sharded + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/conf/backup.yml + /usr/bin/sed -e 's/name:/name: backup-minio-sharded/' + kubectl_bin apply -f - + /usr/bin/sed -e 's/storageName:/storageName: minio/' ++ mktemp + local LAST_OUT=/tmp/tmp.QsXp7chDJB ++ mktemp + local LAST_ERR=/tmp/tmp.TSzVpZwcps + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QsXp7chDJB perconaservermongodbbackup.psmdb.percona.com/backup-minio-sharded created + cat /tmp/tmp.TSzVpZwcps + rm /tmp/tmp.QsXp7chDJB /tmp/tmp.TSzVpZwcps + return 0 + '[' -z '' ']' + backup_name_aws=backup-aws-s3-sharded + backup_name_gcp=backup-gcp-cs-sharded + backup_name_azure=backup-azure-blob-sharded + run_backup aws-s3 backup-aws-s3-sharded + local storage=aws-s3 + local backup_name=backup-aws-s3-sharded + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/conf/backup.yml + /usr/bin/sed -e 's/name:/name: backup-aws-s3-sharded/' + /usr/bin/sed -e 's/storageName:/storageName: aws-s3/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.hTK4sLqwOX ++ mktemp + local LAST_ERR=/tmp/tmp.t0nC50ZvIr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hTK4sLqwOX perconaservermongodbbackup.psmdb.percona.com/backup-aws-s3-sharded created + cat /tmp/tmp.t0nC50ZvIr + rm /tmp/tmp.hTK4sLqwOX /tmp/tmp.t0nC50ZvIr + return 0 + run_backup gcp-cs backup-gcp-cs-sharded + local storage=gcp-cs + local backup_name=backup-gcp-cs-sharded + /usr/bin/sed -e 's/name:/name: backup-gcp-cs-sharded/' + /usr/bin/sed -e 's/storageName:/storageName: gcp-cs/' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/conf/backup.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.g8KNnZewLX ++ mktemp + local LAST_ERR=/tmp/tmp.J3tvq8yiIh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g8KNnZewLX perconaservermongodbbackup.psmdb.percona.com/backup-gcp-cs-sharded created + cat /tmp/tmp.J3tvq8yiIh + rm /tmp/tmp.g8KNnZewLX /tmp/tmp.J3tvq8yiIh + return 0 + run_backup azure-blob backup-azure-blob-sharded + local storage=azure-blob + local backup_name=backup-azure-blob-sharded + /usr/bin/sed -e 's/name:/name: backup-azure-blob-sharded/' + /usr/bin/sed -e 's/storageName:/storageName: azure-blob/' + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/conf/backup.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ILAush9SV3 ++ mktemp + local LAST_ERR=/tmp/tmp.aNJVPJuIMr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ILAush9SV3 perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob-sharded created + cat /tmp/tmp.aNJVPJuIMr + rm /tmp/tmp.ILAush9SV3 /tmp/tmp.aNJVPJuIMr + return 0 + wait_backup backup-aws-s3-sharded + local backup_name=backup-aws-s3-sharded + set +o xtrace backup-aws-s3-sharded........................................... + wait_backup backup-gcp-cs-sharded + local backup_name=backup-gcp-cs-sharded + set +o xtrace backup-gcp-cs-sharded..................... + wait_backup backup-azure-blob-sharded + local backup_name=backup-azure-blob-sharded + set +o xtrace backup-azure-blob-sharded....................... + wait_backup backup-minio-sharded + local backup_name=backup-minio-sharded + set +o xtrace backup-minio-sharded. + '[' -z '' ']' + echo 'drop collection' drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.G9uo4oX77T +++ mktemp ++ local LAST_ERR=/tmp/tmp.KhHx7SL5dO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.G9uo4oX77T ++ cat /tmp/tmp.KhHx7SL5dO ++ rm /tmp/tmp.G9uo4oX77T /tmp/tmp.KhHx7SL5dO ++ return 0 + local client_container=psmdb-client-5f578b7f94-66hkw + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.EJAs2ix7ta ++ mktemp + local LAST_ERR=/tmp/tmp.3q6wJaZdDO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EJAs2ix7ta Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("820c957e-8b6e-411a-b5ca-3cd2ac481bd7") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.3q6wJaZdDO + rm /tmp/tmp.EJAs2ix7ta /tmp/tmp.3q6wJaZdDO + return 0 + echo 'check backup and restore -- aws-s3' check backup and restore -- aws-s3 + run_restore backup-aws-s3-sharded _restore_sharded + local backup_name=backup-aws-s3-sharded + kubectl_bin apply -f - + /usr/bin/sed -e 's/name:/name: restore-backup-aws-s3-sharded/' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/conf/restore.yml + /usr/bin/sed -e 's/backupName:/backupName: backup-aws-s3-sharded/' ++ mktemp + local LAST_OUT=/tmp/tmp.y1l8uYUEYo ++ mktemp + local LAST_ERR=/tmp/tmp.Aoal4eWmjE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.y1l8uYUEYo perconaservermongodbrestore.psmdb.percona.com/restore-backup-aws-s3-sharded created + cat /tmp/tmp.Aoal4eWmjE + rm /tmp/tmp.y1l8uYUEYo /tmp/tmp.Aoal4eWmjE + return 0 + run_recovery_check backup-aws-s3-sharded _restore_sharded + local backup_name=backup-aws-s3-sharded + local compare_suffix=_restore_sharded + wait_restore backup-aws-s3-sharded some-name requested 0 1200 + local backup_name=backup-aws-s3-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + set +o xtrace waiting psmdb-restore/backup-aws-s3-sharded to reach requested state............................................................................................................................................. + '[' 0 -eq 1 ']' + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-physical-sharded-17205", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.8ZQrUq65uW ++ mktemp + local LAST_ERR=/tmp/tmp.sOPNiIDN98 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8ZQrUq65uW + cat /tmp/tmp.sOPNiIDN98 + rm /tmp/tmp.8ZQrUq65uW /tmp/tmp.sOPNiIDN98 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + version_gt 1.22 ++ bc -l ++ echo '1.26 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + wait_restore backup-aws-s3-sharded some-name ready 0 1800 + local backup_name=backup-aws-s3-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + set +o xtrace waiting psmdb-restore/backup-aws-s3-sharded to reach ready state............................................................................... + '[' 0 -eq 1 ']' + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.ohKGPdIQ1c ++ mktemp + local LAST_ERR=/tmp/tmp.34DxdatK1X + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ohKGPdIQ1c apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-physical-sharded-17205"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-physical-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"minio":{"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod7.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"exposeType":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2024-06-04T12:25:27Z" generation: 2 name: some-name namespace: demand-backup-physical-sharded-17205 resourceVersion: "32666" uid: 2d7deafd-8d47-4747-9251-a15e50369740 spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-physical-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 minio: s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.17.0 image: perconalab/percona-server-mongodb-operator:main-mongod7.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: exposeType: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: conditions: - lastTransitionTime: "2024-06-04T12:25:30Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:27:06Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:27:06Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:27:56Z" reason: MongosReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:27:56Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:28:06Z" status: "True" type: ready - lastTransitionTime: "2024-06-04T12:33:09Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:33:55Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:33:55Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:34:02Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:34:02Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:34:35Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:34:35Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:35:02Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:35:02Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:35:04Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:35:04Z" status: "True" type: initializing host: 34.133.190.96 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod7.0 mongoVersion: 7.0.11-6 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 6 replsets: cfg: initialized: true ready: 3 size: 3 status: ready rs0: added_as_shard: true initialized: true ready: 3 size: 3 status: ready size: 6 state: initializing + cat /tmp/tmp.34DxdatK1X + rm /tmp/tmp.ohKGPdIQ1c /tmp/tmp.34DxdatK1X + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.M6LnRke0EM +++ mktemp ++ local LAST_ERR=/tmp/tmp.lTqHnXM1B8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M6LnRke0EM ++ cat /tmp/tmp.lTqHnXM1B8 ++ rm /tmp/tmp.M6LnRke0EM /tmp/tmp.lTqHnXM1B8 ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2qybf9mFNO +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ce5bjvcq2b ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2qybf9mFNO ++ cat /tmp/tmp.Ce5bjvcq2b ++ rm /tmp/tmp.2qybf9mFNO /tmp/tmp.Ce5bjvcq2b ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wsoFwHv1wF +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z3CsiNBpZS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wsoFwHv1wF ++ cat /tmp/tmp.Z3CsiNBpZS ++ rm /tmp/tmp.wsoFwHv1wF /tmp/tmp.Z3CsiNBpZS ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ex3qwUs03d +++ mktemp ++ local LAST_ERR=/tmp/tmp.8VHoKAJlyW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ex3qwUs03d ++ cat /tmp/tmp.8VHoKAJlyW ++ rm /tmp/tmp.ex3qwUs03d /tmp/tmp.8VHoKAJlyW ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jM2NO5803L +++ mktemp ++ local LAST_ERR=/tmp/tmp.A9HUekDzXX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jM2NO5803L ++ cat /tmp/tmp.A9HUekDzXX ++ rm /tmp/tmp.jM2NO5803L /tmp/tmp.A9HUekDzXX ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ejf2EvI4DH +++ mktemp ++ local LAST_ERR=/tmp/tmp.nfnh4U1E7q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ejf2EvI4DH ++ cat /tmp/tmp.nfnh4U1E7q ++ rm /tmp/tmp.Ejf2EvI4DH /tmp/tmp.nfnh4U1E7q ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2chmdvSh01 +++ mktemp ++ local LAST_ERR=/tmp/tmp.gcWHABqNg7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2chmdvSh01 ++ cat /tmp/tmp.gcWHABqNg7 ++ rm /tmp/tmp.2chmdvSh01 /tmp/tmp.gcWHABqNg7 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kJMHmy0XDz +++ mktemp ++ local LAST_ERR=/tmp/tmp.pWLcb2RTad ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kJMHmy0XDz ++ cat /tmp/tmp.pWLcb2RTad ++ rm /tmp/tmp.kJMHmy0XDz /tmp/tmp.pWLcb2RTad ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ht9u0YJsTx +++ mktemp ++ local LAST_ERR=/tmp/tmp.zoFXzovLcj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ht9u0YJsTx ++ cat /tmp/tmp.zoFXzovLcj ++ rm /tmp/tmp.ht9u0YJsTx /tmp/tmp.zoFXzovLcj ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2VQf70UXfQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.Li57ZedkVP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2VQf70UXfQ ++ cat /tmp/tmp.Li57ZedkVP ++ rm /tmp/tmp.2VQf70UXfQ /tmp/tmp.Li57ZedkVP ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6r2nV25KRE +++ mktemp ++ local LAST_ERR=/tmp/tmp.OVL4m6qOay ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6r2nV25KRE ++ cat /tmp/tmp.OVL4m6qOay ++ rm /tmp/tmp.6r2nV25KRE /tmp/tmp.OVL4m6qOay ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eKnAAXlU7R +++ mktemp ++ local LAST_ERR=/tmp/tmp.1RIVzeemsT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eKnAAXlU7R ++ cat /tmp/tmp.1RIVzeemsT ++ rm /tmp/tmp.eKnAAXlU7R /tmp/tmp.1RIVzeemsT ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.envBe6aFuJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.jCQHOxUUlu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.envBe6aFuJ ++ cat /tmp/tmp.jCQHOxUUlu ++ rm /tmp/tmp.envBe6aFuJ /tmp/tmp.jCQHOxUUlu ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vtcJjT43W3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.3bfz8lyYMw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vtcJjT43W3 ++ cat /tmp/tmp.3bfz8lyYMw ++ rm /tmp/tmp.vtcJjT43W3 /tmp/tmp.3bfz8lyYMw ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6K6MfHWVfo +++ mktemp ++ local LAST_ERR=/tmp/tmp.xYGhxGCBuh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6K6MfHWVfo ++ cat /tmp/tmp.xYGhxGCBuh ++ rm /tmp/tmp.6K6MfHWVfo /tmp/tmp.xYGhxGCBuh ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.16JsKfVeGr +++ mktemp ++ local LAST_ERR=/tmp/tmp.826pmZLH5b ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.16JsKfVeGr ++ cat /tmp/tmp.826pmZLH5b ++ rm /tmp/tmp.16JsKfVeGr /tmp/tmp.826pmZLH5b ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4WzmvnPI00 +++ mktemp ++ local LAST_ERR=/tmp/tmp.tIechxo01B ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4WzmvnPI00 ++ cat /tmp/tmp.tIechxo01B ++ rm /tmp/tmp.4WzmvnPI00 /tmp/tmp.tIechxo01B ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zzmtHoQscx +++ mktemp ++ local LAST_ERR=/tmp/tmp.SAnEN5qPdo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zzmtHoQscx ++ cat /tmp/tmp.SAnEN5qPdo ++ rm /tmp/tmp.zzmtHoQscx /tmp/tmp.SAnEN5qPdo ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 17 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fgVzchYqVz +++ mktemp ++ local LAST_ERR=/tmp/tmp.HtZBS74HDa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fgVzchYqVz ++ cat /tmp/tmp.HtZBS74HDa ++ rm /tmp/tmp.fgVzchYqVz /tmp/tmp.HtZBS74HDa ++ return 0 + [[ ready == \r\e\a\d\y ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Z5PcM4myMk +++ mktemp ++ local LAST_ERR=/tmp/tmp.JzJ700gBHm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Z5PcM4myMk ++ cat /tmp/tmp.JzJ700gBHm ++ rm /tmp/tmp.Z5PcM4myMk /tmp/tmp.JzJ700gBHm ++ return 0 + local client_container=psmdb-client-5f578b7f94-66hkw + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.ipvgfwEqvt ++ mktemp + local LAST_ERR=/tmp/tmp.UxBFSy8lJi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ipvgfwEqvt + cat /tmp/tmp.UxBFSy8lJi + rm /tmp/tmp.ipvgfwEqvt /tmp/tmp.UxBFSy8lJi + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/find-sharded.json /tmp/tmp.XHwOP4Nfog/find-sharded + echo + set -o xtrace + check_exported_mongos_service_endpoint 34.133.190.96 + local host=34.133.190.96 ++ kubectl_bin get psmdb some-name '-o=jsonpath={.status.host}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3JGiggIvAx +++ mktemp ++ local LAST_ERR=/tmp/tmp.x4ahkKvphT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name '-o=jsonpath={.status.host}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3JGiggIvAx ++ cat /tmp/tmp.x4ahkKvphT ++ rm /tmp/tmp.3JGiggIvAx /tmp/tmp.x4ahkKvphT ++ return 0 + '[' 34.133.190.96 '!=' 34.133.190.96 ']' + echo 'drop collection' drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FxeDS2bQnZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.Bv0yREFVEb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FxeDS2bQnZ ++ cat /tmp/tmp.Bv0yREFVEb ++ rm /tmp/tmp.FxeDS2bQnZ /tmp/tmp.Bv0yREFVEb ++ return 0 + local client_container=psmdb-client-5f578b7f94-66hkw + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.lfBjXKA2MS ++ mktemp + local LAST_ERR=/tmp/tmp.O6BcC0amyU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lfBjXKA2MS Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("a2de66f2-0309-417a-8a1f-60c59f636898") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.O6BcC0amyU + rm /tmp/tmp.lfBjXKA2MS /tmp/tmp.O6BcC0amyU + return 0 + echo 'check backup and restore -- gcp-cs' check backup and restore -- gcp-cs + run_restore backup-gcp-cs-sharded _restore_sharded + local backup_name=backup-gcp-cs-sharded + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/conf/restore.yml + /usr/bin/sed -e 's/name:/name: restore-backup-gcp-cs-sharded/' + /usr/bin/sed -e 's/backupName:/backupName: backup-gcp-cs-sharded/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.S2Fvw3Ojyp ++ mktemp + local LAST_ERR=/tmp/tmp.o85S1F4NhM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.S2Fvw3Ojyp perconaservermongodbrestore.psmdb.percona.com/restore-backup-gcp-cs-sharded created + cat /tmp/tmp.o85S1F4NhM + rm /tmp/tmp.S2Fvw3Ojyp /tmp/tmp.o85S1F4NhM + return 0 + run_recovery_check backup-gcp-cs-sharded _restore_sharded + local backup_name=backup-gcp-cs-sharded + local compare_suffix=_restore_sharded + wait_restore backup-gcp-cs-sharded some-name requested 0 1200 + local backup_name=backup-gcp-cs-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + set +o xtrace waiting psmdb-restore/backup-gcp-cs-sharded to reach requested state................................................................................. + '[' 0 -eq 1 ']' + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-physical-sharded-17205", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml statefulset/some-name-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.VpQQLmAYmx ++ mktemp + local LAST_ERR=/tmp/tmp.uVaH4Up2qk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VpQQLmAYmx + cat /tmp/tmp.uVaH4Up2qk + rm /tmp/tmp.VpQQLmAYmx /tmp/tmp.uVaH4Up2qk + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + wait_restore backup-gcp-cs-sharded some-name ready 0 1800 + local backup_name=backup-gcp-cs-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + set +o xtrace waiting psmdb-restore/backup-gcp-cs-sharded to reach ready state........................................................................ + '[' 0 -eq 1 ']' + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.XzzdhT0fcD ++ mktemp + local LAST_ERR=/tmp/tmp.npVXv9wttf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XzzdhT0fcD apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-physical-sharded-17205"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-physical-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"minio":{"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod7.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"exposeType":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2024-06-04T12:25:27Z" generation: 2 name: some-name namespace: demand-backup-physical-sharded-17205 resourceVersion: "37574" uid: 2d7deafd-8d47-4747-9251-a15e50369740 spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-physical-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 minio: s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.17.0 image: perconalab/percona-server-mongodb-operator:main-mongod7.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: exposeType: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: conditions: - lastTransitionTime: "2024-06-04T12:34:02Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:34:02Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:34:35Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:34:35Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:35:02Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:35:02Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:35:04Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:35:04Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:42:53Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:42:53Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:43:21Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:43:21Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:43:36Z" reason: MongosReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:43:59Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:44:39Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:44:39Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:45:18Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:45:18Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:45:58Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:45:58Z" status: "True" type: initializing host: 34.133.190.96 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod7.0 mongoVersion: 7.0.11-6 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 6 replsets: cfg: initialized: true ready: 3 size: 3 status: ready rs0: added_as_shard: true initialized: true ready: 3 size: 3 status: ready size: 6 state: initializing + cat /tmp/tmp.npVXv9wttf + rm /tmp/tmp.XzzdhT0fcD /tmp/tmp.npVXv9wttf + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hpzblkSlgz +++ mktemp ++ local LAST_ERR=/tmp/tmp.BIavt6qOUR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hpzblkSlgz ++ cat /tmp/tmp.BIavt6qOUR ++ rm /tmp/tmp.hpzblkSlgz /tmp/tmp.BIavt6qOUR ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mBTdxRx2AL +++ mktemp ++ local LAST_ERR=/tmp/tmp.Zthk61pSRn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mBTdxRx2AL ++ cat /tmp/tmp.Zthk61pSRn ++ rm /tmp/tmp.mBTdxRx2AL /tmp/tmp.Zthk61pSRn ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8ZFjfyUPQW +++ mktemp ++ local LAST_ERR=/tmp/tmp.gpaqhYvpu7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8ZFjfyUPQW ++ cat /tmp/tmp.gpaqhYvpu7 ++ rm /tmp/tmp.8ZFjfyUPQW /tmp/tmp.gpaqhYvpu7 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dWP0zdvT8J +++ mktemp ++ local LAST_ERR=/tmp/tmp.gIHZiJLtfx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dWP0zdvT8J ++ cat /tmp/tmp.gIHZiJLtfx ++ rm /tmp/tmp.dWP0zdvT8J /tmp/tmp.gIHZiJLtfx ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vcEv1x3Dum +++ mktemp ++ local LAST_ERR=/tmp/tmp.kKi7eLmNAH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vcEv1x3Dum ++ cat /tmp/tmp.kKi7eLmNAH ++ rm /tmp/tmp.vcEv1x3Dum /tmp/tmp.kKi7eLmNAH ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lmCu9ANpat +++ mktemp ++ local LAST_ERR=/tmp/tmp.73b3dNejRN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lmCu9ANpat ++ cat /tmp/tmp.73b3dNejRN ++ rm /tmp/tmp.lmCu9ANpat /tmp/tmp.73b3dNejRN ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.P7A3KzjVLr +++ mktemp ++ local LAST_ERR=/tmp/tmp.7b3ECraQbm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.P7A3KzjVLr ++ cat /tmp/tmp.7b3ECraQbm ++ rm /tmp/tmp.P7A3KzjVLr /tmp/tmp.7b3ECraQbm ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.l8byNLw5Ra +++ mktemp ++ local LAST_ERR=/tmp/tmp.tOdLX7NXGC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.l8byNLw5Ra ++ cat /tmp/tmp.tOdLX7NXGC ++ rm /tmp/tmp.l8byNLw5Ra /tmp/tmp.tOdLX7NXGC ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kBHQ7y6loQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.WAVtePzW8C ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kBHQ7y6loQ ++ cat /tmp/tmp.WAVtePzW8C ++ rm /tmp/tmp.kBHQ7y6loQ /tmp/tmp.WAVtePzW8C ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.D0ZvgS6ZJo +++ mktemp ++ local LAST_ERR=/tmp/tmp.5cVxA1rwV1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.D0ZvgS6ZJo ++ cat /tmp/tmp.5cVxA1rwV1 ++ rm /tmp/tmp.D0ZvgS6ZJo /tmp/tmp.5cVxA1rwV1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AyDiSQsXsh +++ mktemp ++ local LAST_ERR=/tmp/tmp.RjJqUzG139 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AyDiSQsXsh ++ cat /tmp/tmp.RjJqUzG139 ++ rm /tmp/tmp.AyDiSQsXsh /tmp/tmp.RjJqUzG139 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ABpwxPmtNV +++ mktemp ++ local LAST_ERR=/tmp/tmp.pKI9Z5BsM2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ABpwxPmtNV ++ cat /tmp/tmp.pKI9Z5BsM2 ++ rm /tmp/tmp.ABpwxPmtNV /tmp/tmp.pKI9Z5BsM2 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6gJOdX7OOg +++ mktemp ++ local LAST_ERR=/tmp/tmp.owpLKj3lWd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6gJOdX7OOg ++ cat /tmp/tmp.owpLKj3lWd ++ rm /tmp/tmp.6gJOdX7OOg /tmp/tmp.owpLKj3lWd ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CB88M9UvhT +++ mktemp ++ local LAST_ERR=/tmp/tmp.8QSAvHi9JY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CB88M9UvhT ++ cat /tmp/tmp.8QSAvHi9JY ++ rm /tmp/tmp.CB88M9UvhT /tmp/tmp.8QSAvHi9JY ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1NAzSceJOi +++ mktemp ++ local LAST_ERR=/tmp/tmp.zSn2KAnH9E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1NAzSceJOi ++ cat /tmp/tmp.zSn2KAnH9E ++ rm /tmp/tmp.1NAzSceJOi /tmp/tmp.zSn2KAnH9E ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.udAEhzfCya +++ mktemp ++ local LAST_ERR=/tmp/tmp.26VLRRW5YS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.udAEhzfCya ++ cat /tmp/tmp.26VLRRW5YS ++ rm /tmp/tmp.udAEhzfCya /tmp/tmp.26VLRRW5YS ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K8f55HKsWa +++ mktemp ++ local LAST_ERR=/tmp/tmp.WMrHMyIYC1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.K8f55HKsWa ++ cat /tmp/tmp.WMrHMyIYC1 ++ rm /tmp/tmp.K8f55HKsWa /tmp/tmp.WMrHMyIYC1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HGaz9pMxox +++ mktemp ++ local LAST_ERR=/tmp/tmp.LioEZYSH0M ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HGaz9pMxox ++ cat /tmp/tmp.LioEZYSH0M ++ rm /tmp/tmp.HGaz9pMxox /tmp/tmp.LioEZYSH0M ++ return 0 + [[ ready == \r\e\a\d\y ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SujljnveXb +++ mktemp ++ local LAST_ERR=/tmp/tmp.i4kMHqWxOz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SujljnveXb ++ cat /tmp/tmp.i4kMHqWxOz ++ rm /tmp/tmp.SujljnveXb /tmp/tmp.i4kMHqWxOz ++ return 0 + local client_container=psmdb-client-5f578b7f94-66hkw + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.V68hDBIjv6 ++ mktemp + local LAST_ERR=/tmp/tmp.r6Ep7Uon4i + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.V68hDBIjv6 + cat /tmp/tmp.r6Ep7Uon4i + rm /tmp/tmp.V68hDBIjv6 /tmp/tmp.r6Ep7Uon4i + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/find-sharded.json /tmp/tmp.XHwOP4Nfog/find-sharded + echo + set -o xtrace + check_exported_mongos_service_endpoint 34.133.190.96 + local host=34.133.190.96 ++ kubectl_bin get psmdb some-name '-o=jsonpath={.status.host}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vxZioLBGOQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.1I2ROitslO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name '-o=jsonpath={.status.host}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vxZioLBGOQ ++ cat /tmp/tmp.1I2ROitslO ++ rm /tmp/tmp.vxZioLBGOQ /tmp/tmp.1I2ROitslO ++ return 0 + '[' 34.133.190.96 '!=' 34.133.190.96 ']' + echo 'drop collection' drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8IKksVoOGE +++ mktemp ++ local LAST_ERR=/tmp/tmp.pOVCcXWqD5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8IKksVoOGE ++ cat /tmp/tmp.pOVCcXWqD5 ++ rm /tmp/tmp.8IKksVoOGE /tmp/tmp.pOVCcXWqD5 ++ return 0 + local client_container=psmdb-client-5f578b7f94-66hkw + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.4cYbGZT9y5 ++ mktemp + local LAST_ERR=/tmp/tmp.IpQrIM0iJt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4cYbGZT9y5 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("6d7ddcbf-73a0-4181-a6c4-3aa53dbc492a") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.IpQrIM0iJt + rm /tmp/tmp.4cYbGZT9y5 /tmp/tmp.IpQrIM0iJt + return 0 + echo 'check backup and restore -- azure-blob' check backup and restore -- azure-blob + run_restore backup-azure-blob-sharded _restore_sharded + local backup_name=backup-azure-blob-sharded + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/conf/restore.yml + /usr/bin/sed -e 's/name:/name: restore-backup-azure-blob-sharded/' + /usr/bin/sed -e 's/backupName:/backupName: backup-azure-blob-sharded/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Ja9DJHsIdz ++ mktemp + local LAST_ERR=/tmp/tmp.31UhEuwbss + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ja9DJHsIdz perconaservermongodbrestore.psmdb.percona.com/restore-backup-azure-blob-sharded created + cat /tmp/tmp.31UhEuwbss + rm /tmp/tmp.Ja9DJHsIdz /tmp/tmp.31UhEuwbss + return 0 + run_recovery_check backup-azure-blob-sharded _restore_sharded + local backup_name=backup-azure-blob-sharded + local compare_suffix=_restore_sharded + wait_restore backup-azure-blob-sharded some-name requested 0 1200 + local backup_name=backup-azure-blob-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + set +o xtrace waiting psmdb-restore/backup-azure-blob-sharded to reach requested state......................................................................... + '[' 0 -eq 1 ']' + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-physical-sharded-17205", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.vpSR5EM35T ++ mktemp + local LAST_ERR=/tmp/tmp.rsNnz8BL6a + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vpSR5EM35T + cat /tmp/tmp.rsNnz8BL6a + rm /tmp/tmp.vpSR5EM35T /tmp/tmp.rsNnz8BL6a + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + version_gt 1.22 ++ bc -l ++ echo '1.26 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + wait_restore backup-azure-blob-sharded some-name ready 0 1800 + local backup_name=backup-azure-blob-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + set +o xtrace waiting psmdb-restore/backup-azure-blob-sharded to reach ready state..................................................................... + '[' 0 -eq 1 ']' + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.3YIqjzRbZr ++ mktemp + local LAST_ERR=/tmp/tmp.Jf2bPRVfAU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3YIqjzRbZr apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-physical-sharded-17205"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-physical-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"minio":{"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod7.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"exposeType":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2024-06-04T12:25:27Z" generation: 2 name: some-name namespace: demand-backup-physical-sharded-17205 resourceVersion: "42181" uid: 2d7deafd-8d47-4747-9251-a15e50369740 spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-physical-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 minio: s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.17.0 image: perconalab/percona-server-mongodb-operator:main-mongod7.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: exposeType: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: conditions: - lastTransitionTime: "2024-06-04T12:44:39Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:44:39Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:45:18Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:45:18Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:45:58Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:45:58Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:51:52Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:51:52Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:52:14Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:52:14Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:52:22Z" reason: MongosReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:52:36Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:53:13Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:53:13Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:53:20Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:53:20Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:53:46Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:53:46Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:54:20Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:54:20Z" status: "True" type: initializing host: 34.133.190.96 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod7.0 mongoVersion: 7.0.11-6 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 6 replsets: cfg: initialized: true ready: 3 size: 3 status: ready rs0: added_as_shard: true initialized: true ready: 3 size: 3 status: ready size: 6 state: initializing + cat /tmp/tmp.Jf2bPRVfAU + rm /tmp/tmp.3YIqjzRbZr /tmp/tmp.Jf2bPRVfAU + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GhazSG7JNJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.IIh0a53Jri ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GhazSG7JNJ ++ cat /tmp/tmp.IIh0a53Jri ++ rm /tmp/tmp.GhazSG7JNJ /tmp/tmp.IIh0a53Jri ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HuiHjF9MkJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.fKszObt1Ew ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HuiHjF9MkJ ++ cat /tmp/tmp.fKszObt1Ew ++ rm /tmp/tmp.HuiHjF9MkJ /tmp/tmp.fKszObt1Ew ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uH0CIlYd3Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.EQb2ABSLvK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uH0CIlYd3Q ++ cat /tmp/tmp.EQb2ABSLvK ++ rm /tmp/tmp.uH0CIlYd3Q /tmp/tmp.EQb2ABSLvK ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jUMJawiR3P +++ mktemp ++ local LAST_ERR=/tmp/tmp.HLlyXXZtxV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jUMJawiR3P ++ cat /tmp/tmp.HLlyXXZtxV ++ rm /tmp/tmp.jUMJawiR3P /tmp/tmp.HLlyXXZtxV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MOjrhdhz2j +++ mktemp ++ local LAST_ERR=/tmp/tmp.9IvqXWSCsR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MOjrhdhz2j ++ cat /tmp/tmp.9IvqXWSCsR ++ rm /tmp/tmp.MOjrhdhz2j /tmp/tmp.9IvqXWSCsR ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XXmUUDF62s +++ mktemp ++ local LAST_ERR=/tmp/tmp.7bGcZgGD7V ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XXmUUDF62s ++ cat /tmp/tmp.7bGcZgGD7V ++ rm /tmp/tmp.XXmUUDF62s /tmp/tmp.7bGcZgGD7V ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9iVcwAKoIa +++ mktemp ++ local LAST_ERR=/tmp/tmp.TugzL2HWz0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9iVcwAKoIa ++ cat /tmp/tmp.TugzL2HWz0 ++ rm /tmp/tmp.9iVcwAKoIa /tmp/tmp.TugzL2HWz0 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SPzin6dmYB +++ mktemp ++ local LAST_ERR=/tmp/tmp.vmelja01Rz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SPzin6dmYB ++ cat /tmp/tmp.vmelja01Rz ++ rm /tmp/tmp.SPzin6dmYB /tmp/tmp.vmelja01Rz ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DK0JgIMPLZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.WSEmLGaCJ2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DK0JgIMPLZ ++ cat /tmp/tmp.WSEmLGaCJ2 ++ rm /tmp/tmp.DK0JgIMPLZ /tmp/tmp.WSEmLGaCJ2 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0aiK7KGNzw +++ mktemp ++ local LAST_ERR=/tmp/tmp.42jDzGYQxk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0aiK7KGNzw ++ cat /tmp/tmp.42jDzGYQxk ++ rm /tmp/tmp.0aiK7KGNzw /tmp/tmp.42jDzGYQxk ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.o3Zzvdg2Bi +++ mktemp ++ local LAST_ERR=/tmp/tmp.qLVwW5thf3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.o3Zzvdg2Bi ++ cat /tmp/tmp.qLVwW5thf3 ++ rm /tmp/tmp.o3Zzvdg2Bi /tmp/tmp.qLVwW5thf3 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.G6SSSQvr1I +++ mktemp ++ local LAST_ERR=/tmp/tmp.xbSm51JAAw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.G6SSSQvr1I ++ cat /tmp/tmp.xbSm51JAAw ++ rm /tmp/tmp.G6SSSQvr1I /tmp/tmp.xbSm51JAAw ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ErOQ4lO782 +++ mktemp ++ local LAST_ERR=/tmp/tmp.lBaPYiYzFW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ErOQ4lO782 ++ cat /tmp/tmp.lBaPYiYzFW ++ rm /tmp/tmp.ErOQ4lO782 /tmp/tmp.lBaPYiYzFW ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7F2idRwtSV +++ mktemp ++ local LAST_ERR=/tmp/tmp.KtBpt3lQlc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7F2idRwtSV ++ cat /tmp/tmp.KtBpt3lQlc ++ rm /tmp/tmp.7F2idRwtSV /tmp/tmp.KtBpt3lQlc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JHKkNgSIor +++ mktemp ++ local LAST_ERR=/tmp/tmp.Xyos6mQybm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JHKkNgSIor ++ cat /tmp/tmp.Xyos6mQybm ++ rm /tmp/tmp.JHKkNgSIor /tmp/tmp.Xyos6mQybm ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JyuJsEuWBb +++ mktemp ++ local LAST_ERR=/tmp/tmp.dcyGubrZch ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JyuJsEuWBb ++ cat /tmp/tmp.dcyGubrZch ++ rm /tmp/tmp.JyuJsEuWBb /tmp/tmp.dcyGubrZch ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Pnfc7jUQ3j +++ mktemp ++ local LAST_ERR=/tmp/tmp.CqghsHfyNP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Pnfc7jUQ3j ++ cat /tmp/tmp.CqghsHfyNP ++ rm /tmp/tmp.Pnfc7jUQ3j /tmp/tmp.CqghsHfyNP ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qUfRZwhgeK +++ mktemp ++ local LAST_ERR=/tmp/tmp.4qHjBg0lHS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qUfRZwhgeK ++ cat /tmp/tmp.4qHjBg0lHS ++ rm /tmp/tmp.qUfRZwhgeK /tmp/tmp.4qHjBg0lHS ++ return 0 + [[ ready == \r\e\a\d\y ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 mongodb '' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VKQdnkqjWn +++ mktemp ++ local LAST_ERR=/tmp/tmp.CC064yH3HL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VKQdnkqjWn ++ cat /tmp/tmp.CC064yH3HL ++ rm /tmp/tmp.VKQdnkqjWn /tmp/tmp.CC064yH3HL ++ return 0 + local client_container=psmdb-client-5f578b7f94-66hkw + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.X6sr84mIrQ ++ mktemp + local LAST_ERR=/tmp/tmp.OyA02gYDxp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.X6sr84mIrQ + cat /tmp/tmp.OyA02gYDxp + rm /tmp/tmp.X6sr84mIrQ /tmp/tmp.OyA02gYDxp + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/find-sharded.json /tmp/tmp.XHwOP4Nfog/find-sharded + echo + set -o xtrace + check_exported_mongos_service_endpoint 34.133.190.96 + local host=34.133.190.96 ++ kubectl_bin get psmdb some-name '-o=jsonpath={.status.host}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CGanMcMZyI +++ mktemp ++ local LAST_ERR=/tmp/tmp.kAX2xSTyJ6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name '-o=jsonpath={.status.host}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CGanMcMZyI ++ cat /tmp/tmp.kAX2xSTyJ6 ++ rm /tmp/tmp.CGanMcMZyI /tmp/tmp.kAX2xSTyJ6 ++ return 0 + '[' 34.133.190.96 '!=' 34.133.190.96 ']' + echo 'drop collection' drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fGsD9MLTwU +++ mktemp ++ local LAST_ERR=/tmp/tmp.xbwE8ZmHFl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fGsD9MLTwU ++ cat /tmp/tmp.xbwE8ZmHFl ++ rm /tmp/tmp.fGsD9MLTwU /tmp/tmp.xbwE8ZmHFl ++ return 0 + local client_container=psmdb-client-5f578b7f94-66hkw + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.xQ1EdJl7ZK ++ mktemp + local LAST_ERR=/tmp/tmp.oEg4IWamJI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xQ1EdJl7ZK Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("f8fb5194-e219-4869-9e28-bbe5bf289844") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.oEg4IWamJI + rm /tmp/tmp.xQ1EdJl7ZK /tmp/tmp.oEg4IWamJI + return 0 + echo 'check backup and restore -- minio' check backup and restore -- minio ++ get_backup_dest backup-minio-sharded ++ local backup_name=backup-minio-sharded ++ kubectl_bin get psmdb-backup backup-minio-sharded -o 'jsonpath={.status.destination}' ++ sed 's|azure://||' ++ sed 's|s3://||' ++ sed -e 's/.json$//' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cjfnYRPFNK +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ir6tgYkgw1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-minio-sharded -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cjfnYRPFNK ++ cat /tmp/tmp.Ir6tgYkgw1 ++ rm /tmp/tmp.cjfnYRPFNK /tmp/tmp.Ir6tgYkgw1 ++ return 0 + backup_dest_minio=operator-testing/2024-06-04T12:30:31Z + run_restore backup-minio-sharded _restore_sharded + local backup_name=backup-minio-sharded + /usr/bin/sed -e 's/name:/name: restore-backup-minio-sharded/' + /usr/bin/sed -e 's/backupName:/backupName: backup-minio-sharded/' + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/conf/restore.yml ++ mktemp + local LAST_OUT=/tmp/tmp.cCrZzSClxW ++ mktemp + local LAST_ERR=/tmp/tmp.XTaU9H8AB6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cCrZzSClxW perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-sharded created + cat /tmp/tmp.XTaU9H8AB6 + rm /tmp/tmp.cCrZzSClxW /tmp/tmp.XTaU9H8AB6 + return 0 + run_recovery_check backup-minio-sharded _restore_sharded + local backup_name=backup-minio-sharded + local compare_suffix=_restore_sharded + wait_restore backup-minio-sharded some-name requested 0 1200 + local backup_name=backup-minio-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + set +o xtrace waiting psmdb-restore/backup-minio-sharded to reach requested state................................................................ + '[' 0 -eq 1 ']' + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-physical-sharded-17205", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.bE8Q5ZBl4U ++ mktemp + local LAST_ERR=/tmp/tmp.u2OlXYZnq6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bE8Q5ZBl4U + cat /tmp/tmp.u2OlXYZnq6 + rm /tmp/tmp.bE8Q5ZBl4U /tmp/tmp.u2OlXYZnq6 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.XHwOP4Nfog/statefulset_some-name-rs0.yml + wait_restore backup-minio-sharded some-name ready 0 1800 + local backup_name=backup-minio-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + set +o xtrace waiting psmdb-restore/backup-minio-sharded to reach ready state................................................................ + '[' 0 -eq 1 ']' + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.PRtAciXdKu ++ mktemp + local LAST_ERR=/tmp/tmp.MAO9Hq6FLW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PRtAciXdKu apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-physical-sharded-17205"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-physical-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"minio":{"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod7.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"exposeType":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2024-06-04T12:25:27Z" generation: 2 name: some-name namespace: demand-backup-physical-sharded-17205 resourceVersion: "46558" uid: 2d7deafd-8d47-4747-9251-a15e50369740 spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-physical-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 minio: s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.17.0 image: perconalab/percona-server-mongodb-operator:main-mongod7.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: exposeType: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: conditions: - lastTransitionTime: "2024-06-04T12:53:46Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:53:46Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T12:54:20Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T12:54:20Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T13:00:10Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T13:00:10Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T13:00:25Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T13:00:25Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T13:00:41Z" reason: MongosReady status: "True" type: ready - lastTransitionTime: "2024-06-04T13:00:58Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T13:01:39Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T13:01:39Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T13:02:12Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T13:02:12Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T13:02:18Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T13:02:18Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T13:02:52Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T13:02:52Z" status: "True" type: initializing - lastTransitionTime: "2024-06-04T13:02:59Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-04T13:02:59Z" status: "True" type: initializing host: 34.133.190.96 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod7.0 mongoVersion: 7.0.11-6 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 6 replsets: cfg: initialized: true ready: 3 size: 3 status: ready rs0: added_as_shard: true initialized: true ready: 3 size: 3 status: ready size: 6 state: initializing + cat /tmp/tmp.MAO9Hq6FLW + rm /tmp/tmp.PRtAciXdKu /tmp/tmp.MAO9Hq6FLW + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.idHaVc3inY +++ mktemp ++ local LAST_ERR=/tmp/tmp.SYPMYGlsK1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.idHaVc3inY ++ cat /tmp/tmp.SYPMYGlsK1 ++ rm /tmp/tmp.idHaVc3inY /tmp/tmp.SYPMYGlsK1 ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yy5xJawNec +++ mktemp ++ local LAST_ERR=/tmp/tmp.8sPBaPocKa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yy5xJawNec ++ cat /tmp/tmp.8sPBaPocKa ++ rm /tmp/tmp.yy5xJawNec /tmp/tmp.8sPBaPocKa ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Cginn1oJBW +++ mktemp ++ local LAST_ERR=/tmp/tmp.wizT6e25bO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Cginn1oJBW ++ cat /tmp/tmp.wizT6e25bO ++ rm /tmp/tmp.Cginn1oJBW /tmp/tmp.wizT6e25bO ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kMocN4sVwx +++ mktemp ++ local LAST_ERR=/tmp/tmp.QAmtQv7ckB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kMocN4sVwx ++ cat /tmp/tmp.QAmtQv7ckB ++ rm /tmp/tmp.kMocN4sVwx /tmp/tmp.QAmtQv7ckB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gB5K1WlKnC +++ mktemp ++ local LAST_ERR=/tmp/tmp.alxxdhKeJI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gB5K1WlKnC ++ cat /tmp/tmp.alxxdhKeJI ++ rm /tmp/tmp.gB5K1WlKnC /tmp/tmp.alxxdhKeJI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oJFZzPh3Ez +++ mktemp ++ local LAST_ERR=/tmp/tmp.INIncczne7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oJFZzPh3Ez ++ cat /tmp/tmp.INIncczne7 ++ rm /tmp/tmp.oJFZzPh3Ez /tmp/tmp.INIncczne7 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5ahKrEXBHA +++ mktemp ++ local LAST_ERR=/tmp/tmp.8CPIGWt0bj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5ahKrEXBHA ++ cat /tmp/tmp.8CPIGWt0bj ++ rm /tmp/tmp.5ahKrEXBHA /tmp/tmp.8CPIGWt0bj ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NHrqeJ1MPj +++ mktemp ++ local LAST_ERR=/tmp/tmp.AFj4sBK7bW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NHrqeJ1MPj ++ cat /tmp/tmp.AFj4sBK7bW ++ rm /tmp/tmp.NHrqeJ1MPj /tmp/tmp.AFj4sBK7bW ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.v06WtFvXO8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QveOuqjD9i ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v06WtFvXO8 ++ cat /tmp/tmp.QveOuqjD9i ++ rm /tmp/tmp.v06WtFvXO8 /tmp/tmp.QveOuqjD9i ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LNUQsAQi3i +++ mktemp ++ local LAST_ERR=/tmp/tmp.yAcfquodDK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LNUQsAQi3i ++ cat /tmp/tmp.yAcfquodDK ++ rm /tmp/tmp.LNUQsAQi3i /tmp/tmp.yAcfquodDK ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dVH1wFGChu +++ mktemp ++ local LAST_ERR=/tmp/tmp.TXr4rX5VqT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dVH1wFGChu ++ cat /tmp/tmp.TXr4rX5VqT ++ rm /tmp/tmp.dVH1wFGChu /tmp/tmp.TXr4rX5VqT ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TJ2gw9Snbv +++ mktemp ++ local LAST_ERR=/tmp/tmp.d4OxKCj9tM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TJ2gw9Snbv ++ cat /tmp/tmp.d4OxKCj9tM ++ rm /tmp/tmp.TJ2gw9Snbv /tmp/tmp.d4OxKCj9tM ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5ZALjAnaZx +++ mktemp ++ local LAST_ERR=/tmp/tmp.BBwbZVEWGE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5ZALjAnaZx ++ cat /tmp/tmp.BBwbZVEWGE ++ rm /tmp/tmp.5ZALjAnaZx /tmp/tmp.BBwbZVEWGE ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WPIHHHYQQz +++ mktemp ++ local LAST_ERR=/tmp/tmp.MlTWzr72Y1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WPIHHHYQQz ++ cat /tmp/tmp.MlTWzr72Y1 ++ rm /tmp/tmp.WPIHHHYQQz /tmp/tmp.MlTWzr72Y1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DuMhDfUhA7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.KMhi2WObGu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DuMhDfUhA7 ++ cat /tmp/tmp.KMhi2WObGu ++ rm /tmp/tmp.DuMhDfUhA7 /tmp/tmp.KMhi2WObGu ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4tey4vIhGq +++ mktemp ++ local LAST_ERR=/tmp/tmp.geaEaAeWub ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4tey4vIhGq ++ cat /tmp/tmp.geaEaAeWub ++ rm /tmp/tmp.4tey4vIhGq /tmp/tmp.geaEaAeWub ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hRmaPx6Yzo +++ mktemp ++ local LAST_ERR=/tmp/tmp.sMEVixdEAB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hRmaPx6Yzo ++ cat /tmp/tmp.sMEVixdEAB ++ rm /tmp/tmp.hRmaPx6Yzo /tmp/tmp.sMEVixdEAB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pgQzBHBl3P +++ mktemp ++ local LAST_ERR=/tmp/tmp.5CeinL1mJx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pgQzBHBl3P ++ cat /tmp/tmp.5CeinL1mJx ++ rm /tmp/tmp.pgQzBHBl3P /tmp/tmp.5CeinL1mJx ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 17 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qUjRn8rihT +++ mktemp ++ local LAST_ERR=/tmp/tmp.KvXUW9f7hl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qUjRn8rihT ++ cat /tmp/tmp.KvXUW9f7hl ++ rm /tmp/tmp.qUjRn8rihT /tmp/tmp.KvXUW9f7hl ++ return 0 + [[ ready == \r\e\a\d\y ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FmalB0ihAR +++ mktemp ++ local LAST_ERR=/tmp/tmp.h0s98BzNYr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FmalB0ihAR ++ cat /tmp/tmp.h0s98BzNYr ++ rm /tmp/tmp.FmalB0ihAR /tmp/tmp.h0s98BzNYr ++ return 0 + local client_container=psmdb-client-5f578b7f94-66hkw + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.m786nEQjLL ++ mktemp + local LAST_ERR=/tmp/tmp.aiQYK0II3B + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-66hkw -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-17205.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.m786nEQjLL + cat /tmp/tmp.aiQYK0II3B + rm /tmp/tmp.m786nEQjLL /tmp/tmp.aiQYK0II3B + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/e2e-tests/demand-backup-physical-sharded/compare/find-sharded.json /tmp/tmp.XHwOP4Nfog/find-sharded + echo + set -o xtrace + destroy demand-backup-physical-sharded-17205 + local namespace=demand-backup-physical-sharded-17205 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.I3jur5GFtb ++ mktemp + local LAST_ERR=/tmp/tmp.6oTwt7U15f + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I3jur5GFtb customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.6oTwt7U15f + rm /tmp/tmp.I3jur5GFtb /tmp/tmp.6oTwt7U15f + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-physical-sharded-17205 backup-azure-blob-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob-sharded patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-physical-sharded-17205 backup-minio-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-minio-sharded patched + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.vtJTjkfW1K ++ mktemp + local LAST_ERR=/tmp/tmp.6xCuIc4Job + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vtJTjkfW1K customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com condition met + cat /tmp/tmp.6xCuIc4Job + rm /tmp/tmp.vtJTjkfW1K /tmp/tmp.6xCuIc4Job + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.3uEXGXe3Oc ++ mktemp + local LAST_ERR=/tmp/tmp.KbXF7XYaBq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3uEXGXe3Oc + cat /tmp/tmp.KbXF7XYaBq + rm /tmp/tmp.3uEXGXe3Oc /tmp/tmp.KbXF7XYaBq + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.Cjjjk3y0jO ++ mktemp + local LAST_ERR=/tmp/tmp.wweGtAfRbD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Cjjjk3y0jO + cat /tmp/tmp.wweGtAfRbD + rm /tmp/tmp.Cjjjk3y0jO /tmp/tmp.wweGtAfRbD + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.gAEYo6JBe9 ++ mktemp + local LAST_ERR=/tmp/tmp.pHayManbem + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1563/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gAEYo6JBe9 clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.pHayManbem + rm /tmp/tmp.gAEYo6JBe9 /tmp/tmp.pHayManbem + return 0 + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml + : + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace demand-backup-physical-sharded-17205 + rm -rf /tmp/tmp.XHwOP4Nfog + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.6lWPaQGoJ1 + local LAST_OUT=/tmp/tmp.NZODi4F4jN ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.YDjwjhkIRp + local LAST_ERR=/tmp/tmp.IwShXMYcEJ + local exit_status=0 + local exit_status=0 + local timeout=4 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace demand-backup-physical-sharded-17205 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator