++ echo 'Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/logs/demand-backup-sharded.log' Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/logs/demand-backup-sharded.log ++ '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP_AZURE= ++ oc get projects ++ kubectl get nodes ++ grep '^minikube' +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' ']' ++ EKS=0 +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep gke WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' v1.26.15-gke.1243000 ']' ++ GKE=1 +++ /usr/bin/sed -r 's/[^0-9.]+//g' +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.26 + set_debug + [[ 1 == 1 ]] + set -o xtrace + create_infra demand-backup-sharded-18059 + local ns=demand-backup-sharded-18059 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.t9S7xlSUzp ++ mktemp + local LAST_ERR=/tmp/tmp.5g9A516EHc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.t9S7xlSUzp customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.5g9A516EHc + rm /tmp/tmp.t9S7xlSUzp /tmp/tmp.5g9A516EHc + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' E0510 10:40:46.316335 32089 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:40:46.584852 32089 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:40:46.710954 32089 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:40:46.821086 32089 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-sharded-29843 backup-aws-s3 --type=merge -p '{"metadata":{"finalizers":[]}}' E0510 10:40:48.498243 32381 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:40:48.772258 32381 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:40:48.910159 32381 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:40:49.154389 32381 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request perconaservermongodbbackup.psmdb.percona.com/backup-aws-s3 patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-sharded-29843 backup-gcp-cs --type=merge -p '{"metadata":{"finalizers":[]}}' E0510 10:40:51.262971 32632 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:40:51.535023 32632 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:40:51.686133 32632 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:40:51.848722 32632 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request perconaservermongodbbackup.psmdb.percona.com/backup-gcp-cs patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-sharded-29843 backup-minio --type=merge -p '{"metadata":{"finalizers":[]}}' E0510 10:40:53.387888 374 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:40:53.611004 374 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:40:53.726630 374 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:40:53.838650 374 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request perconaservermongodbbackup.psmdb.percona.com/backup-minio patched + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.jun2dQ0h2H ++ mktemp + local LAST_ERR=/tmp/tmp.N8fy12a55X + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jun2dQ0h2H customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com condition met + cat /tmp/tmp.N8fy12a55X + rm /tmp/tmp.jun2dQ0h2H /tmp/tmp.N8fy12a55X + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' E0510 10:40:59.430104 1122 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:40:59.653509 1122 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:40:59.770999 1122 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:40:59.881093 1122 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:40:59.990325 1122 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' E0510 10:41:01.100951 1353 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:41:01.324046 1353 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:41:01.436250 1353 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:41:01.552525 1353 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:41:01.661164 1353 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.e725fmrUkk ++ mktemp + local LAST_ERR=/tmp/tmp.GkEvJwJCpv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.e725fmrUkk + cat /tmp/tmp.GkEvJwJCpv + rm /tmp/tmp.e725fmrUkk /tmp/tmp.GkEvJwJCpv + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide E0510 10:41:03.333068 1748 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:41:03.662308 1748 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:41:03.778046 1748 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:41:03.889610 1748 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:41:04.002782 1748 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' E0510 10:41:05.331831 1931 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:41:05.577422 1931 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:41:05.687695 1931 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:41:05.795639 1931 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:41:05.903880 1931 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.VFSVHybbAJ ++ mktemp + local LAST_ERR=/tmp/tmp.oWJOkyr4Pf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VFSVHybbAJ + cat /tmp/tmp.oWJOkyr4Pf + rm /tmp/tmp.VFSVHybbAJ /tmp/tmp.oWJOkyr4Pf + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.nKTEA5qeGf ++ mktemp + local LAST_ERR=/tmp/tmp.SLWVzWwiuR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nKTEA5qeGf clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.SLWVzWwiuR + rm /tmp/tmp.nKTEA5qeGf /tmp/tmp.SLWVzWwiuR + return 0 + check_crd_for_deletion PR-1393-7b414d13 + local git_tag=PR-1393-7b414d13 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1393-7b414d13/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/bin/sed s/---//g + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.i1v1zkUshi +++ mktemp ++ local LAST_ERR=/tmp/tmp.eGtXrebyu6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.i1v1zkUshi ++ cat /tmp/tmp.eGtXrebyu6 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.i1v1zkUshi ++ cat /tmp/tmp.eGtXrebyu6 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.i1v1zkUshi ++ cat /tmp/tmp.eGtXrebyu6 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.i1v1zkUshi ++ cat /tmp/tmp.eGtXrebyu6 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.i1v1zkUshi /tmp/tmp.eGtXrebyu6 ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl api-resources E0510 10:41:29.334309 5077 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0510 10:41:29.660618 5077 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request error: unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request ++ awk '{print $1}' ++ grep chaos-mesh.org ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ++ mktemp ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.Syg8SoLz0y + local LAST_OUT=/tmp/tmp.BE4M97HpZ5 ++ mktemp + local LAST_ERR=/tmp/tmp.DkbATbRHG6 + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.w1TfsTBoHt + local exit_status=0 + local timeout=4 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Syg8SoLz0y + cat /tmp/tmp.DkbATbRHG6 + rm /tmp/tmp.Syg8SoLz0y /tmp/tmp.DkbATbRHG6 + return 0 namespace "demand-backup-sharded-29843" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BE4M97HpZ5 namespace "psmdb-operator" deleted + cat /tmp/tmp.w1TfsTBoHt + rm /tmp/tmp.BE4M97HpZ5 /tmp/tmp.w1TfsTBoHt + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.zfn0Hj99oK ++ mktemp + local LAST_ERR=/tmp/tmp.GjW9dHdlSG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zfn0Hj99oK + cat /tmp/tmp.GjW9dHdlSG + rm /tmp/tmp.zfn0Hj99oK /tmp/tmp.GjW9dHdlSG + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Aseo3g8gzI ++ mktemp + local LAST_ERR=/tmp/tmp.q8SmoBrxvf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Aseo3g8gzI namespace/psmdb-operator created + cat /tmp/tmp.q8SmoBrxvf + rm /tmp/tmp.Aseo3g8gzI /tmp/tmp.q8SmoBrxvf + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.3UYWpYhnan +++ mktemp ++ local LAST_ERR=/tmp/tmp.inBxGyWu5I ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3UYWpYhnan ++ cat /tmp/tmp.inBxGyWu5I ++ rm /tmp/tmp.3UYWpYhnan /tmp/tmp.inBxGyWu5I ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1393-7b414d13-23-cluster2 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Lh8WkQrJYY ++ mktemp + local LAST_ERR=/tmp/tmp.ZIOOYbjdLz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1393-7b414d13-23-cluster2 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Lh8WkQrJYY Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1393-7b414d13-23-cluster2" modified. + cat /tmp/tmp.ZIOOYbjdLz + rm /tmp/tmp.Lh8WkQrJYY /tmp/tmp.ZIOOYbjdLz + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.qFIr4E5FYB ++ mktemp + local LAST_ERR=/tmp/tmp.YBRoSrbfUu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qFIr4E5FYB customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.YBRoSrbfUu + rm /tmp/tmp.qFIr4E5FYB /tmp/tmp.YBRoSrbfUu + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: psmdb-operator^' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/cw-rbac.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.rc3GuwrO0E ++ mktemp + local LAST_ERR=/tmp/tmp.fbRh2AcuOf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rc3GuwrO0E clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.fbRh2AcuOf + rm /tmp/tmp.rc3GuwrO0E /tmp/tmp.fbRh2AcuOf + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1393-7b414d13") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.uiDAtkzFtC ++ mktemp + local LAST_ERR=/tmp/tmp.abaDsgKefA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uiDAtkzFtC deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.abaDsgKefA + rm /tmp/tmp.uiDAtkzFtC /tmp/tmp.abaDsgKefA + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.hyWpF23okU +++ mktemp ++ local LAST_ERR=/tmp/tmp.L8zJ0VJ1A2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hyWpF23okU ++ cat /tmp/tmp.L8zJ0VJ1A2 ++ rm /tmp/tmp.hyWpF23okU /tmp/tmp.L8zJ0VJ1A2 ++ return 0 + wait_pod percona-server-mongodb-operator-f94797cf7-cwhwv + local pod=percona-server-mongodb-operator-f94797cf7-cwhwv + set +o xtrace waiting for pod/percona-server-mongodb-operator-f94797cf7-cwhwv to be ready...OK + create_namespace demand-backup-sharded-18059 + local namespace=demand-backup-sharded-18059 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh.org ++ awk '{print $1}' ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces demand-backup-sharded-18059' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-sharded-18059 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace demand-backup-sharded-18059 --ignore-not-found ++ mktemp + xargs kubectl delete ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + awk '{print$1}' + kubectl_bin get ns + local LAST_OUT=/tmp/tmp.NjwOX6jGfM ++ mktemp + local LAST_OUT=/tmp/tmp.RutsTpiOwP ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.JkOhDrn6UN + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.sdvqXzHIq2 + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace demand-backup-sharded-18059 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RutsTpiOwP + cat /tmp/tmp.sdvqXzHIq2 + rm /tmp/tmp.RutsTpiOwP /tmp/tmp.sdvqXzHIq2 + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NjwOX6jGfM + cat /tmp/tmp.JkOhDrn6UN + rm /tmp/tmp.NjwOX6jGfM /tmp/tmp.JkOhDrn6UN + return 0 + kubectl_bin wait --for=delete namespace demand-backup-sharded-18059 ++ mktemp + local LAST_OUT=/tmp/tmp.Tp0Y55JfzR ++ mktemp + local LAST_ERR=/tmp/tmp.xVMa4S14Je + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace demand-backup-sharded-18059 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Tp0Y55JfzR + cat /tmp/tmp.xVMa4S14Je + rm /tmp/tmp.Tp0Y55JfzR /tmp/tmp.xVMa4S14Je + return 0 + desc 'create namespace demand-backup-sharded-18059' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-sharded-18059 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-sharded-18059 ++ mktemp + local LAST_OUT=/tmp/tmp.2rZUGhFIjX ++ mktemp + local LAST_ERR=/tmp/tmp.h35lRZ0X0F + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace demand-backup-sharded-18059 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2rZUGhFIjX namespace/demand-backup-sharded-18059 created + cat /tmp/tmp.h35lRZ0X0F + rm /tmp/tmp.2rZUGhFIjX /tmp/tmp.h35lRZ0X0F + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.4iHNrHINol +++ mktemp ++ local LAST_ERR=/tmp/tmp.Xrv8AgQ4t1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4iHNrHINol ++ cat /tmp/tmp.Xrv8AgQ4t1 ++ rm /tmp/tmp.4iHNrHINol /tmp/tmp.Xrv8AgQ4t1 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1393-7b414d13-23-cluster2 --namespace=demand-backup-sharded-18059 ++ mktemp + local LAST_OUT=/tmp/tmp.dtdwTkPn94 ++ mktemp + local LAST_ERR=/tmp/tmp.Lwgsnj630T + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1393-7b414d13-23-cluster2 --namespace=demand-backup-sharded-18059 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dtdwTkPn94 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1393-7b414d13-23-cluster2" modified. + cat /tmp/tmp.Lwgsnj630T + rm /tmp/tmp.dtdwTkPn94 /tmp/tmp.Lwgsnj630T + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Fri May 10 10:42:25 2024 NAMESPACE: demand-backup-sharded-18059 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.demand-backup-sharded-18059.svc.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace demand-backup-sharded-18059 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace demand-backup-sharded-18059 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace demand-backup-sharded-18059 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace demand-backup-sharded-18059 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0nM0rAlQxv +++ mktemp ++ local LAST_ERR=/tmp/tmp.8a0IGhk7og ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0nM0rAlQxv ++ cat /tmp/tmp.8a0IGhk7og ++ rm /tmp/tmp.0nM0rAlQxv /tmp/tmp.8a0IGhk7og ++ return 0 + MINIO_POD=minio-service-57dd49b-t67fl + wait_pod minio-service-57dd49b-t67fl + local pod=minio-service-57dd49b-t67fl + set +o xtrace waiting for pod/minio-service-57dd49b-t67fl to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-sharded-18059.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.ANyLzmRSoz ++ mktemp + local LAST_ERR=/tmp/tmp.qrRL6MpiQf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-sharded-18059.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ANyLzmRSoz service/minio-service created + cat /tmp/tmp.qrRL6MpiQf + rm /tmp/tmp.ANyLzmRSoz /tmp/tmp.qrRL6MpiQf + return 0 + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.ylbaJ0kxVG ++ mktemp + local LAST_ERR=/tmp/tmp.KfYJAOAwxC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ylbaJ0kxVG make_bucket: operator-testing pod "aws-cli" deleted + cat /tmp/tmp.KfYJAOAwxC If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: unable to upgrade connection: container aws-cli not found in pod aws-cli_demand-backup-sharded-18059 + rm /tmp/tmp.ylbaJ0kxVG /tmp/tmp.KfYJAOAwxC + return 0 + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + cluster=some-name + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.92skK6UDei ++ mktemp + local LAST_ERR=/tmp/tmp.Sh1XyJUzDJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.92skK6UDei secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.Sh1XyJUzDJ + rm /tmp/tmp.92skK6UDei /tmp/tmp.Sh1XyJUzDJ + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.vaW5YAOdLq ++ mktemp + local LAST_ERR=/tmp/tmp.RdyPSEBY95 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vaW5YAOdLq secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.RdyPSEBY95 + rm /tmp/tmp.vaW5YAOdLq /tmp/tmp.RdyPSEBY95 + return 0 + version_gt 1.19 ++ bc -l ++ echo '1.26 >= 1.19' + '[' 1 -eq 1 ']' + return 0 + '[' 0 -ne 1 ']' + /usr/bin/sed s/docker/runc/g + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/conf/container-rc.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.qpUND0xoUd ++ mktemp + local LAST_ERR=/tmp/tmp.uu7blkJdIZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qpUND0xoUd runtimeclass.node.k8s.io/container-rc unchanged + cat /tmp/tmp.uu7blkJdIZ + rm /tmp/tmp.qpUND0xoUd /tmp/tmp.uu7blkJdIZ + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/conf/some-name-rs0.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/conf/some-name-rs0.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + local LAST_OUT=/tmp/tmp.yryA3gPOea + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/conf/some-name-rs0.yml ++ mktemp + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1393-7b414d13"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + local LAST_ERR=/tmp/tmp.g6p4QHKHa0 + local exit_status=0 + local timeout=4 + yq eval '.spec.upgradeOptions.apply="Never"' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yryA3gPOea perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.g6p4QHKHa0 + rm /tmp/tmp.yryA3gPOea /tmp/tmp.g6p4QHKHa0 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.....OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.......OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.osfFNBZf3D +++ mktemp ++ local LAST_ERR=/tmp/tmp.PAASPjpBWI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.osfFNBZf3D ++ cat /tmp/tmp.PAASPjpBWI ++ rm /tmp/tmp.osfFNBZf3D /tmp/tmp.PAASPjpBWI ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qi4C1wemt2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.JMhtUFEtFR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qi4C1wemt2 ++ cat /tmp/tmp.JMhtUFEtFR ++ rm /tmp/tmp.qi4C1wemt2 /tmp/tmp.JMhtUFEtFR ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.......................................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bUSIWR909M +++ mktemp ++ local LAST_ERR=/tmp/tmp.qFGUEO9biM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bUSIWR909M ++ cat /tmp/tmp.qFGUEO9biM ++ rm /tmp/tmp.bUSIWR909M /tmp/tmp.qFGUEO9biM ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.D1d8ocOmGc +++ mktemp ++ local LAST_ERR=/tmp/tmp.04TCsEfxXE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.D1d8ocOmGc ++ cat /tmp/tmp.04TCsEfxXE ++ rm /tmp/tmp.D1d8ocOmGc /tmp/tmp.04TCsEfxXE ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XIaRk0Wat4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.duujGjwWar ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XIaRk0Wat4 ++ cat /tmp/tmp.duujGjwWar ++ rm /tmp/tmp.XIaRk0Wat4 /tmp/tmp.duujGjwWar ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QThAD5N7PV +++ mktemp ++ local LAST_ERR=/tmp/tmp.HAoSzyAk36 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QThAD5N7PV ++ cat /tmp/tmp.HAoSzyAk36 ++ rm /tmp/tmp.QThAD5N7PV /tmp/tmp.HAoSzyAk36 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 10 + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.nvt7xvr20D/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-rs0-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-sharded-18059", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml statefulset/some-name-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.BAItXdqXPV ++ mktemp + local LAST_ERR=/tmp/tmp.66jDneoSN2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BAItXdqXPV + cat /tmp/tmp.66jDneoSN2 + rm /tmp/tmp.BAItXdqXPV /tmp/tmp.66jDneoSN2 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.nvt7xvr20D/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.nvt7xvr20D/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.nvt7xvr20D/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-rs0.yml /tmp/tmp.nvt7xvr20D/statefulset_some-name-rs0.yml + compare_kubectl statefulset/some-name-rs1 + local resource=statefulset/some-name-rs1 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-rs1.yml + local new_result=/tmp/tmp.nvt7xvr20D/statefulset_some-name-rs1.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-rs1-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-sharded-18059", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml statefulset/some-name-rs1 ++ mktemp + local LAST_OUT=/tmp/tmp.WAVNTZYy3k ++ mktemp + local LAST_ERR=/tmp/tmp.ihpA2Gyp8G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs1 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WAVNTZYy3k + cat /tmp/tmp.ihpA2Gyp8G + rm /tmp/tmp.WAVNTZYy3k /tmp/tmp.ihpA2Gyp8G + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.nvt7xvr20D/statefulset_some-name-rs1.yml + version_gt 1.22 ++ bc -l ++ echo '1.26 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.nvt7xvr20D/statefulset_some-name-rs1.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.nvt7xvr20D/statefulset_some-name-rs1.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-rs1.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-rs1.yml /tmp/tmp.nvt7xvr20D/statefulset_some-name-rs1.yml + compare_kubectl statefulset/some-name-rs2 + local resource=statefulset/some-name-rs2 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-rs2.yml + local new_result=/tmp/tmp.nvt7xvr20D/statefulset_some-name-rs2.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-rs2-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs2 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-sharded-18059", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.ay1xgHlf4B ++ mktemp + local LAST_ERR=/tmp/tmp.pwcqg99qSO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs2 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ay1xgHlf4B + cat /tmp/tmp.pwcqg99qSO + rm /tmp/tmp.ay1xgHlf4B /tmp/tmp.pwcqg99qSO + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.nvt7xvr20D/statefulset_some-name-rs2.yml + version_gt 1.22 ++ bc -l ++ echo '1.26 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.nvt7xvr20D/statefulset_some-name-rs2.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.nvt7xvr20D/statefulset_some-name-rs2.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-rs2.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-rs2.yml /tmp/tmp.nvt7xvr20D/statefulset_some-name-rs2.yml + compare_kubectl statefulset/some-name-cfg + local resource=statefulset/some-name-cfg + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-cfg.yml + local new_result=/tmp/tmp.nvt7xvr20D/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-sharded-18059", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.BYeQoQUKyd ++ mktemp + local LAST_ERR=/tmp/tmp.s6Up2zTFN4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BYeQoQUKyd + cat /tmp/tmp.s6Up2zTFN4 + rm /tmp/tmp.BYeQoQUKyd /tmp/tmp.s6Up2zTFN4 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.nvt7xvr20D/statefulset_some-name-cfg.yml + version_gt 1.22 ++ bc -l ++ echo '1.26 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.nvt7xvr20D/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.nvt7xvr20D/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-cfg.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-cfg.yml /tmp/tmp.nvt7xvr20D/statefulset_some-name-cfg.yml + compare_kubectl statefulset/some-name-mongos '' + local resource=statefulset/some-name-mongos + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-mongos.yml + local new_result=/tmp/tmp.nvt7xvr20D/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-sharded-18059", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.kFz05OIvKj ++ mktemp + local LAST_ERR=/tmp/tmp.HYYWVpU0hu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kFz05OIvKj + cat /tmp/tmp.HYYWVpU0hu + rm /tmp/tmp.kFz05OIvKj /tmp/tmp.HYYWVpU0hu + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.nvt7xvr20D/statefulset_some-name-mongos.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.nvt7xvr20D/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.nvt7xvr20D/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-mongos.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-mongos.yml /tmp/tmp.nvt7xvr20D/statefulset_some-name-mongos.yml + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/conf/mongos-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.iVZIw5y0RA ++ mktemp + local LAST_ERR=/tmp/tmp.hkGDcwmpPt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/conf/mongos-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iVZIw5y0RA secret/some-name-mongos created + cat /tmp/tmp.hkGDcwmpPt + rm /tmp/tmp.iVZIw5y0RA /tmp/tmp.hkGDcwmpPt + return 0 + sleep 10 + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oaOt4eoj7k +++ mktemp ++ local LAST_ERR=/tmp/tmp.Dr0aho672U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oaOt4eoj7k ++ cat /tmp/tmp.Dr0aho672U ++ rm /tmp/tmp.oaOt4eoj7k /tmp/tmp.Dr0aho672U ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7IDky05wdt +++ mktemp ++ local LAST_ERR=/tmp/tmp.2gsrVSmGJb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7IDky05wdt ++ cat /tmp/tmp.2gsrVSmGJb ++ rm /tmp/tmp.7IDky05wdt /tmp/tmp.2gsrVSmGJb ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness........................................................................................... + compare_kubectl statefulset/some-name-mongos -secret + local resource=statefulset/some-name-mongos + local postfix=-secret + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-mongos-secret.yml + local new_result=/tmp/tmp.nvt7xvr20D/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-mongos-secret-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-sharded-18059", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.y4UOb3e6VT ++ mktemp + local LAST_ERR=/tmp/tmp.JwQuvmM3QY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.y4UOb3e6VT + cat /tmp/tmp.JwQuvmM3QY + rm /tmp/tmp.y4UOb3e6VT /tmp/tmp.JwQuvmM3QY + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.nvt7xvr20D/statefulset_some-name-mongos.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.nvt7xvr20D/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.nvt7xvr20D/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-mongos-secret.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/statefulset_some-name-mongos-secret.yml /tmp/tmp.nvt7xvr20D/statefulset_some-name-mongos.yml + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"},{db:"myApp1",role:"readWrite"},{db:"myApp2",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.demand-backup-sharded-18059 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"},{db:"myApp1",role:"readWrite"},{db:"myApp2",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5T4yr7gFPd +++ mktemp ++ local LAST_ERR=/tmp/tmp.EyFZs8AcGX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5T4yr7gFPd ++ cat /tmp/tmp.EyFZs8AcGX ++ rm /tmp/tmp.5T4yr7gFPd /tmp/tmp.EyFZs8AcGX ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"},{db:"myApp1",role:"readWrite"},{db:"myApp2",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.XtLnWe11sG ++ mktemp + local LAST_ERR=/tmp/tmp.T31nCyEphA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"},{db:"myApp1",role:"readWrite"},{db:"myApp2",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XtLnWe11sG Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("f33d8098-1eac-492c-b25c-cce516b2faf0") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" }, { "db" : "myApp1", "role" : "readWrite" }, { "db" : "myApp2", "role" : "readWrite" } ] } bye + cat /tmp/tmp.T31nCyEphA + rm /tmp/tmp.XtLnWe11sG /tmp/tmp.T31nCyEphA + return 0 + run_mongos 'sh.enableSharding("myApp","rs0")' clusterAdmin:clusterAdmin123456@some-name-mongos.demand-backup-sharded-18059 + local 'command=sh.enableSharding("myApp","rs0")' + local uri=clusterAdmin:clusterAdmin123456@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xUp4tkMfZl +++ mktemp ++ local LAST_ERR=/tmp/tmp.ihcY2GD98n ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xUp4tkMfZl ++ cat /tmp/tmp.ihcY2GD98n ++ rm /tmp/tmp.xUp4tkMfZl /tmp/tmp.ihcY2GD98n ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''sh.enableSharding("myApp","rs0")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.PPvLA5qVtT ++ mktemp + local LAST_ERR=/tmp/tmp.VEpzr77oC2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''sh.enableSharding("myApp","rs0")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PPvLA5qVtT Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("589e66c4-c21c-482d-a9b5-f0977e56bdf4") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1715338261, 8), "signature" : { "hash" : BinData(0,"1grW8adMKCwptEl9lkUB+tuHdwY="), "keyId" : NumberLong("7367320328118206487") } }, "operationTime" : Timestamp(1715338261, 2) } bye + cat /tmp/tmp.VEpzr77oC2 + rm /tmp/tmp.PPvLA5qVtT /tmp/tmp.VEpzr77oC2 + return 0 + run_mongos 'sh.enableSharding("myApp1","rs1")' clusterAdmin:clusterAdmin123456@some-name-mongos.demand-backup-sharded-18059 + local 'command=sh.enableSharding("myApp1","rs1")' + local uri=clusterAdmin:clusterAdmin123456@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.S2tqUXrl7H +++ mktemp ++ local LAST_ERR=/tmp/tmp.fv5ecqPEvA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.S2tqUXrl7H ++ cat /tmp/tmp.fv5ecqPEvA ++ rm /tmp/tmp.S2tqUXrl7H /tmp/tmp.fv5ecqPEvA ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''sh.enableSharding("myApp1","rs1")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.k6C9oqA5j8 ++ mktemp + local LAST_ERR=/tmp/tmp.g9HQRonov7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''sh.enableSharding("myApp1","rs1")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.k6C9oqA5j8 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("fbeb7e7c-8749-4fa0-baa7-de8eaf542b68") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1715338265, 9), "signature" : { "hash" : BinData(0,"JloooVbPcLXEVeMFNN148skKBDA="), "keyId" : NumberLong("7367320328118206487") } }, "operationTime" : Timestamp(1715338265, 4) } bye + cat /tmp/tmp.g9HQRonov7 + rm /tmp/tmp.k6C9oqA5j8 /tmp/tmp.g9HQRonov7 + return 0 + run_mongos 'sh.enableSharding("myApp2","rs2")' clusterAdmin:clusterAdmin123456@some-name-mongos.demand-backup-sharded-18059 + local 'command=sh.enableSharding("myApp2","rs2")' + local uri=clusterAdmin:clusterAdmin123456@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IAr2AFEPFO +++ mktemp ++ local LAST_ERR=/tmp/tmp.XbSCingUWM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IAr2AFEPFO ++ cat /tmp/tmp.XbSCingUWM ++ rm /tmp/tmp.IAr2AFEPFO /tmp/tmp.XbSCingUWM ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''sh.enableSharding("myApp2","rs2")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.pVhRX6RfpS ++ mktemp + local LAST_ERR=/tmp/tmp.00s35ziIey + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''sh.enableSharding("myApp2","rs2")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pVhRX6RfpS Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("0833cd17-d42e-4834-ab3b-9954e323e57f") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1715338268, 8), "signature" : { "hash" : BinData(0,"Q1EZ7mIf3OV05/uQHhfaOagOctI="), "keyId" : NumberLong("7367320328118206487") } }, "operationTime" : Timestamp(1715338268, 3) } bye + cat /tmp/tmp.00s35ziIey + rm /tmp/tmp.pVhRX6RfpS /tmp/tmp.00s35ziIey + return 0 + insert_data_mongos 100500 myApp + local data=100500 + local db_name=myApp + local flags= + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 '' '' '' + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7Q9IyDaUVq +++ mktemp ++ local LAST_ERR=/tmp/tmp.EgBrDHa7N4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7Q9IyDaUVq ++ cat /tmp/tmp.EgBrDHa7N4 ++ rm /tmp/tmp.7Q9IyDaUVq /tmp/tmp.EgBrDHa7N4 ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.JLImwv78ET ++ mktemp + local LAST_ERR=/tmp/tmp.Nv7ldSprhS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JLImwv78ET Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("9808af53-08cf-472e-b683-92247cf1a79e") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.Nv7ldSprhS + rm /tmp/tmp.JLImwv78ET /tmp/tmp.Nv7ldSprhS + return 0 + insert_data_mongos 100500 myApp1 + local data=100500 + local db_name=myApp1 + local flags= + run_mongos 'use myApp1\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 '' '' '' + local 'command=use myApp1\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iPQ9zzLo8b +++ mktemp ++ local LAST_ERR=/tmp/tmp.E5g0DHQDax ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iPQ9zzLo8b ++ cat /tmp/tmp.E5g0DHQDax ++ rm /tmp/tmp.iPQ9zzLo8b /tmp/tmp.E5g0DHQDax ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp1\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.WawVNuC1wH ++ mktemp + local LAST_ERR=/tmp/tmp.BrgrQg1z5z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp1\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WawVNuC1wH Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("7f4a70e1-8f51-4a7c-955a-b9611ab5001d") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match switched to db myApp1 WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.BrgrQg1z5z + rm /tmp/tmp.WawVNuC1wH /tmp/tmp.BrgrQg1z5z + return 0 + insert_data_mongos 100500 myApp2 + local data=100500 + local db_name=myApp2 + local flags= + run_mongos 'use myApp2\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 '' '' '' + local 'command=use myApp2\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lpHSpb8pVE +++ mktemp ++ local LAST_ERR=/tmp/tmp.ujwHsqAyQV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lpHSpb8pVE ++ cat /tmp/tmp.ujwHsqAyQV ++ rm /tmp/tmp.lpHSpb8pVE /tmp/tmp.ujwHsqAyQV ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp2\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.iqKjEk0AgC ++ mktemp + local LAST_ERR=/tmp/tmp.iY1xI3KNj5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp2\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iqKjEk0AgC Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("fc9c1f1f-9cbd-4ea0-ac8b-c6ba3e0dd6bf") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match switched to db myApp2 WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.iY1xI3KNj5 + rm /tmp/tmp.iqKjEk0AgC /tmp/tmp.iY1xI3KNj5 + return 0 + minikube_sleep + sleep_time=10 + [[ '' == 1 ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local postfix= + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local LAST_OUT=/tmp/tmp.uDXTVWV4cF +++ mktemp ++ local LAST_ERR=/tmp/tmp.F8TzCEmXKC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uDXTVWV4cF ++ cat /tmp/tmp.F8TzCEmXKC ++ rm /tmp/tmp.uDXTVWV4cF /tmp/tmp.F8TzCEmXKC ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.9MfsSQmkVx ++ mktemp + local LAST_ERR=/tmp/tmp.fgM6YQ380V + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9MfsSQmkVx + cat /tmp/tmp.fgM6YQ380V + rm /tmp/tmp.9MfsSQmkVx /tmp/tmp.fgM6YQ380V + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/find.json /tmp/tmp.nvt7xvr20D/find + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace some-name-rs0-0 + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace some-name-rs0-1 + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace some-name-rs0-2 + wait_backup_agent some-name-rs1-0 + local agent_pod=some-name-rs1-0 + set +o xtrace some-name-rs1-0 + wait_backup_agent some-name-rs1-1 + local agent_pod=some-name-rs1-1 + set +o xtrace some-name-rs1-1 + wait_backup_agent some-name-rs1-2 + local agent_pod=some-name-rs1-2 + set +o xtrace some-name-rs1-2 + wait_backup_agent some-name-rs2-0 + local agent_pod=some-name-rs2-0 + set +o xtrace some-name-rs2-0 + wait_backup_agent some-name-rs2-1 + local agent_pod=some-name-rs2-1 + set +o xtrace some-name-rs2-1 + wait_backup_agent some-name-rs2-2 + local agent_pod=some-name-rs2-2 + set +o xtrace some-name-rs2-2 + backup_name_aws=backup-aws-s3 + backup_name_minio=backup-minio + backup_name_gcp=backup-gcp-cs + backup_name_azure=backup-azure-blob + desc 'run backups' + set +o xtrace ----------------------------------------------------------------------------------- run backups ----------------------------------------------------------------------------------- + run_backup minio + local storage=minio + local backup_name=backup-minio + desc 'run backup backup-minio' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio ----------------------------------------------------------------------------------- + yq eval '.metadata.name = "backup-minio" | .spec.storageName = "minio"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/conf/backup-minio.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.fJj2oeCF5h ++ mktemp + local LAST_ERR=/tmp/tmp.K57d4DRwnt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fJj2oeCF5h perconaservermongodbbackup.psmdb.percona.com/backup-minio created + cat /tmp/tmp.K57d4DRwnt + rm /tmp/tmp.fJj2oeCF5h /tmp/tmp.K57d4DRwnt + return 0 + '[' -z '' ']' + run_backup aws-s3 + local storage=aws-s3 + local backup_name=backup-aws-s3 + desc 'run backup backup-aws-s3' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-aws-s3 ----------------------------------------------------------------------------------- + kubectl_bin apply -f - + yq eval '.metadata.name = "backup-aws-s3" | .spec.storageName = "aws-s3"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/conf/backup-aws-s3.yml ++ mktemp + local LAST_OUT=/tmp/tmp.kgLf7goksS ++ mktemp + local LAST_ERR=/tmp/tmp.cxlEVVtEDh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kgLf7goksS perconaservermongodbbackup.psmdb.percona.com/backup-aws-s3 created + cat /tmp/tmp.cxlEVVtEDh + rm /tmp/tmp.kgLf7goksS /tmp/tmp.cxlEVVtEDh + return 0 + run_backup gcp-cs + local storage=gcp-cs + local backup_name=backup-gcp-cs + desc 'run backup backup-gcp-cs' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-gcp-cs ----------------------------------------------------------------------------------- + kubectl_bin apply -f - + yq eval '.metadata.name = "backup-gcp-cs" | .spec.storageName = "gcp-cs"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/conf/backup-gcp-cs.yml ++ mktemp + local LAST_OUT=/tmp/tmp.x5paXT8cHo ++ mktemp + local LAST_ERR=/tmp/tmp.KpScoaEBPl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.x5paXT8cHo perconaservermongodbbackup.psmdb.percona.com/backup-gcp-cs created + cat /tmp/tmp.KpScoaEBPl + rm /tmp/tmp.x5paXT8cHo /tmp/tmp.KpScoaEBPl + return 0 + run_backup azure-blob + local storage=azure-blob + local backup_name=backup-azure-blob + desc 'run backup backup-azure-blob' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-azure-blob ----------------------------------------------------------------------------------- + yq eval '.metadata.name = "backup-azure-blob" | .spec.storageName = "azure-blob"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/conf/backup-azure-blob.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.n0ObNSnsiN ++ mktemp + local LAST_ERR=/tmp/tmp.pPxOqfBYlY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.n0ObNSnsiN perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob created + cat /tmp/tmp.pPxOqfBYlY + rm /tmp/tmp.n0ObNSnsiN /tmp/tmp.pPxOqfBYlY + return 0 + wait_backup backup-aws-s3 + local backup_name=backup-aws-s3 + set +o xtrace backup-aws-s3............................................ + wait_backup backup-gcp-cs + local backup_name=backup-gcp-cs + set +o xtrace backup-gcp-cs................. + wait_backup backup-azure-blob + local backup_name=backup-azure-blob + set +o xtrace backup-azure-blob................... + wait_backup backup-minio + local backup_name=backup-minio + set +o xtrace backup-minio. + sleep 5 + '[' -z '' ']' + desc 'check backup and restore -- aws-s3' + set +o xtrace ----------------------------------------------------------------------------------- check backup and restore -- aws-s3 ----------------------------------------------------------------------------------- ++ get_backup_dest backup-aws-s3 ++ local backup_name=backup-aws-s3 ++ sed 's|azure://||' ++ kubectl_bin get psmdb-backup backup-aws-s3 -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E0npQIvwE0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.RqQGo27X70 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-aws-s3 -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E0npQIvwE0 ++ cat /tmp/tmp.RqQGo27X70 ++ rm /tmp/tmp.E0npQIvwE0 /tmp/tmp.RqQGo27X70 ++ return 0 + backup_dest_aws=operator-testing/psmdb-demand-backup-sharded/2024-05-10T10:52:44Z + gunzip + curl -s https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-sharded/2024-05-10T10:52:44Z/rs0/myApp.test.gz + curl -s https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-sharded/2024-05-10T10:52:44Z/rs1/myApp1.test.gz + gunzip + curl -s https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-sharded/2024-05-10T10:52:44Z/rs2/myApp2.test.gz + gunzip + insert_data_mongos 100501 myApp + local data=100501 + local db_name=myApp + local flags= + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 '' '' '' + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4vY7Ocl1bX +++ mktemp ++ local LAST_ERR=/tmp/tmp.F3bmHJrCW3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4vY7Ocl1bX ++ cat /tmp/tmp.F3bmHJrCW3 ++ rm /tmp/tmp.4vY7Ocl1bX /tmp/tmp.F3bmHJrCW3 ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.37j6zinSVE ++ mktemp + local LAST_ERR=/tmp/tmp.9hFlBCxau6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.37j6zinSVE Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("2e13527d-eda6-4790-aeed-36b5b7946cfd") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.9hFlBCxau6 + rm /tmp/tmp.37j6zinSVE /tmp/tmp.9hFlBCxau6 + return 0 + insert_data_mongos 100501 myApp1 + local data=100501 + local db_name=myApp1 + local flags= + run_mongos 'use myApp1\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 '' '' '' + local 'command=use myApp1\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Gpjplohmev +++ mktemp ++ local LAST_ERR=/tmp/tmp.J45fqESb6T ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Gpjplohmev ++ cat /tmp/tmp.J45fqESb6T ++ rm /tmp/tmp.Gpjplohmev /tmp/tmp.J45fqESb6T ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp1\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.1AfdwsGtPu ++ mktemp + local LAST_ERR=/tmp/tmp.gVnMffaq48 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp1\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1AfdwsGtPu Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("e48dfdda-9784-4754-9690-77858d14ae0f") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match switched to db myApp1 WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.gVnMffaq48 + rm /tmp/tmp.1AfdwsGtPu /tmp/tmp.gVnMffaq48 + return 0 + insert_data_mongos 100501 myApp2 + local data=100501 + local db_name=myApp2 + local flags= + run_mongos 'use myApp2\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 '' '' '' + local 'command=use myApp2\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZZeCIpSQH1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ScX4s9OwZw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZZeCIpSQH1 ++ cat /tmp/tmp.ScX4s9OwZw ++ rm /tmp/tmp.ZZeCIpSQH1 /tmp/tmp.ScX4s9OwZw ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp2\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.uB2jsOUB96 ++ mktemp + local LAST_ERR=/tmp/tmp.AS07o7nBqh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp2\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uB2jsOUB96 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("10f7930d-30c7-4efa-a594-4aa539af6079") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match switched to db myApp2 WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.AS07o7nBqh + rm /tmp/tmp.uB2jsOUB96 /tmp/tmp.AS07o7nBqh + return 0 + check_data -2nd + local postfix=-2nd ++ seq 0 2 + for i in '$(seq 0 2)' + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-sharded-18059 -2nd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local LAST_OUT=/tmp/tmp.H9MyelqjaM +++ mktemp ++ local LAST_ERR=/tmp/tmp.j8nj9nA7z6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.H9MyelqjaM ++ cat /tmp/tmp.j8nj9nA7z6 ++ rm /tmp/tmp.H9MyelqjaM /tmp/tmp.j8nj9nA7z6 ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.NZoYjHuTFd ++ mktemp + local LAST_ERR=/tmp/tmp.GH072SCZ1R + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NZoYjHuTFd + cat /tmp/tmp.GH072SCZ1R + rm /tmp/tmp.NZoYjHuTFd /tmp/tmp.GH072SCZ1R + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/find-2nd.json /tmp/tmp.nvt7xvr20D/find-2nd + for i in '$(seq 0 2)' + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-sharded-18059 -2nd1 .svc.cluster.local myApp1 test + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local postfix=-2nd1 + local suffix=.svc.cluster.local + local database=myApp1 + local collection=test + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongos 'use myApp1\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 mongodb .svc.cluster.local + local 'command=use myApp1\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oggFn3PgiS +++ mktemp ++ local LAST_ERR=/tmp/tmp.cQgo5Sucil ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oggFn3PgiS ++ cat /tmp/tmp.cQgo5Sucil ++ rm /tmp/tmp.oggFn3PgiS /tmp/tmp.cQgo5Sucil ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp1\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.VFYGiCADZU ++ mktemp + local LAST_ERR=/tmp/tmp.RB2YtMmnea + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp1\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VFYGiCADZU + cat /tmp/tmp.RB2YtMmnea + rm /tmp/tmp.VFYGiCADZU /tmp/tmp.RB2YtMmnea + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/find-2nd1.json /tmp/tmp.nvt7xvr20D/find-2nd1 + for i in '$(seq 0 2)' + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-sharded-18059 -2nd2 .svc.cluster.local myApp2 test + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local postfix=-2nd2 + local suffix=.svc.cluster.local + local database=myApp2 + local collection=test + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongos 'use myApp2\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 mongodb .svc.cluster.local + local 'command=use myApp2\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9z3tPanRTp +++ mktemp ++ local LAST_ERR=/tmp/tmp.uevdb5aYYN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9z3tPanRTp ++ cat /tmp/tmp.uevdb5aYYN ++ rm /tmp/tmp.9z3tPanRTp /tmp/tmp.uevdb5aYYN ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp2\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.cZP81oUEyg ++ mktemp + local LAST_ERR=/tmp/tmp.wG44kJtfNC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp2\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cZP81oUEyg + cat /tmp/tmp.wG44kJtfNC + rm /tmp/tmp.cZP81oUEyg /tmp/tmp.wG44kJtfNC + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/find-2nd2.json /tmp/tmp.nvt7xvr20D/find-2nd2 + run_restore backup-aws-s3 + local backup_name=backup-aws-s3 + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/conf/restore.yml + /usr/bin/sed -e 's/backupName:/backupName: backup-aws-s3/' + kubectl_bin apply -f - + /usr/bin/sed -e 's/name:/name: restore-backup-aws-s3/' ++ mktemp + local LAST_OUT=/tmp/tmp.lbKnUa0w8D ++ mktemp + local LAST_ERR=/tmp/tmp.c9RHTmj1xP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lbKnUa0w8D perconaservermongodbrestore.psmdb.percona.com/restore-backup-aws-s3 created + cat /tmp/tmp.c9RHTmj1xP + rm /tmp/tmp.lbKnUa0w8D /tmp/tmp.c9RHTmj1xP + return 0 + wait_restore backup-aws-s3 some-name + local backup_name=backup-aws-s3 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=780 + set +o xtrace waiting psmdb-restore/backup-aws-s3 to reach ready state............. + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iHvXatqIOf +++ mktemp ++ local LAST_ERR=/tmp/tmp.z9KQLOODmF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iHvXatqIOf ++ cat /tmp/tmp.z9KQLOODmF ++ rm /tmp/tmp.iHvXatqIOf /tmp/tmp.z9KQLOODmF ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nZYGfvLE6U +++ mktemp ++ local LAST_ERR=/tmp/tmp.EyFmrK4qMh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nZYGfvLE6U ++ cat /tmp/tmp.EyFmrK4qMh ++ rm /tmp/tmp.nZYGfvLE6U /tmp/tmp.EyFmrK4qMh ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RBCUEWeB0V +++ mktemp ++ local LAST_ERR=/tmp/tmp.rG9VDeWwhG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RBCUEWeB0V ++ cat /tmp/tmp.rG9VDeWwhG ++ rm /tmp/tmp.RBCUEWeB0V /tmp/tmp.rG9VDeWwhG ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CBeeOX2AqX +++ mktemp ++ local LAST_ERR=/tmp/tmp.nW2a6kv1DZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CBeeOX2AqX ++ cat /tmp/tmp.nW2a6kv1DZ ++ rm /tmp/tmp.CBeeOX2AqX /tmp/tmp.nW2a6kv1DZ ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aFRqrDV86m +++ mktemp ++ local LAST_ERR=/tmp/tmp.2Q2AdUGVAq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aFRqrDV86m ++ cat /tmp/tmp.2Q2AdUGVAq ++ rm /tmp/tmp.aFRqrDV86m /tmp/tmp.2Q2AdUGVAq ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yGj4Liv0UA +++ mktemp ++ local LAST_ERR=/tmp/tmp.R43FZmSYD5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yGj4Liv0UA ++ cat /tmp/tmp.R43FZmSYD5 ++ rm /tmp/tmp.yGj4Liv0UA /tmp/tmp.R43FZmSYD5 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PbWr6I35AE +++ mktemp ++ local LAST_ERR=/tmp/tmp.OGagmqnXkI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PbWr6I35AE ++ cat /tmp/tmp.OGagmqnXkI ++ rm /tmp/tmp.PbWr6I35AE /tmp/tmp.OGagmqnXkI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0XRkN7ce42 +++ mktemp ++ local LAST_ERR=/tmp/tmp.FULWDvNcIY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0XRkN7ce42 ++ cat /tmp/tmp.FULWDvNcIY ++ rm /tmp/tmp.0XRkN7ce42 /tmp/tmp.FULWDvNcIY ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wmcHGbUryf +++ mktemp ++ local LAST_ERR=/tmp/tmp.F8IVQmIxJ8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wmcHGbUryf ++ cat /tmp/tmp.F8IVQmIxJ8 ++ rm /tmp/tmp.wmcHGbUryf /tmp/tmp.F8IVQmIxJ8 ++ return 0 + [[ ready == \r\e\a\d\y ]] + check_data + local postfix= ++ seq 0 2 + for i in '$(seq 0 2)' + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-sharded-18059 '' .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local postfix= + local suffix=.svc.cluster.local + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KvHRHu99gV +++ mktemp ++ local LAST_ERR=/tmp/tmp.MSff8a4fDY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KvHRHu99gV ++ cat /tmp/tmp.MSff8a4fDY ++ rm /tmp/tmp.KvHRHu99gV /tmp/tmp.MSff8a4fDY ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.u2fbRboD1c ++ mktemp + local LAST_ERR=/tmp/tmp.iK4t8fH6pC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.u2fbRboD1c + cat /tmp/tmp.iK4t8fH6pC + rm /tmp/tmp.u2fbRboD1c /tmp/tmp.iK4t8fH6pC + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/find.json /tmp/tmp.nvt7xvr20D/find + for i in '$(seq 0 2)' + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-sharded-18059 1 .svc.cluster.local myApp1 test + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local postfix=1 + local suffix=.svc.cluster.local + local database=myApp1 + local collection=test + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongos 'use myApp1\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 mongodb .svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp1\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ou7VUv2NYz +++ mktemp ++ local LAST_ERR=/tmp/tmp.CzJYs4DTCA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ou7VUv2NYz ++ cat /tmp/tmp.CzJYs4DTCA ++ rm /tmp/tmp.Ou7VUv2NYz /tmp/tmp.CzJYs4DTCA ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp1\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.4PK5t4Iemj ++ mktemp + local LAST_ERR=/tmp/tmp.a6MRx3Fk9u + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp1\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4PK5t4Iemj + cat /tmp/tmp.a6MRx3Fk9u + rm /tmp/tmp.4PK5t4Iemj /tmp/tmp.a6MRx3Fk9u + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/find1.json /tmp/tmp.nvt7xvr20D/find1 + for i in '$(seq 0 2)' + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-sharded-18059 2 .svc.cluster.local myApp2 test + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local postfix=2 + local suffix=.svc.cluster.local + local database=myApp2 + local collection=test + run_mongos 'use myApp2\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 mongodb .svc.cluster.local + local 'command=use myApp2\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Npf7ggwZMt +++ mktemp ++ local LAST_ERR=/tmp/tmp.8ByxwOC3Gh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Npf7ggwZMt ++ cat /tmp/tmp.8ByxwOC3Gh ++ rm /tmp/tmp.Npf7ggwZMt /tmp/tmp.8ByxwOC3Gh ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp2\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.m6QaHGrlIi ++ mktemp + local LAST_ERR=/tmp/tmp.SaI7qsqSAY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp2\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.m6QaHGrlIi + cat /tmp/tmp.SaI7qsqSAY + rm /tmp/tmp.m6QaHGrlIi /tmp/tmp.SaI7qsqSAY + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/find2.json /tmp/tmp.nvt7xvr20D/find2 + desc 'check backup and restore -- gcp-cs' + set +o xtrace ----------------------------------------------------------------------------------- check backup and restore -- gcp-cs ----------------------------------------------------------------------------------- ++ get_backup_dest backup-gcp-cs ++ local backup_name=backup-gcp-cs ++ kubectl_bin get psmdb-backup backup-gcp-cs -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' +++ mktemp ++ sed 's|azure://||' ++ sed 's|s3://||' ++ local LAST_OUT=/tmp/tmp.zItGRigqVH +++ mktemp ++ local LAST_ERR=/tmp/tmp.Y9dDr7jnKM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-gcp-cs -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zItGRigqVH ++ cat /tmp/tmp.Y9dDr7jnKM ++ rm /tmp/tmp.zItGRigqVH /tmp/tmp.Y9dDr7jnKM ++ return 0 + backup_dest_gcp=operator-testing/psmdb-demand-backup-sharded/2024-05-10T10:53:27Z + curl -s https://storage.googleapis.com/operator-testing/psmdb-demand-backup-sharded/2024-05-10T10:53:27Z/rs0/myApp.test.gz + gunzip + gunzip + curl -s https://storage.googleapis.com/operator-testing/psmdb-demand-backup-sharded/2024-05-10T10:53:27Z/rs1/myApp1.test.gz + curl -s https://storage.googleapis.com/operator-testing/psmdb-demand-backup-sharded/2024-05-10T10:53:27Z/rs2/myApp2.test.gz + gunzip + insert_data_mongos 100501 myApp + local data=100501 + local db_name=myApp + local flags= + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 '' '' '' + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LwDRGGP5ua +++ mktemp ++ local LAST_ERR=/tmp/tmp.YVJbGcQXpO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LwDRGGP5ua ++ cat /tmp/tmp.YVJbGcQXpO ++ rm /tmp/tmp.LwDRGGP5ua /tmp/tmp.YVJbGcQXpO ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.1afbJMEITJ ++ mktemp + local LAST_ERR=/tmp/tmp.4KV6ObINpS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1afbJMEITJ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("afb08646-cce1-428e-b917-b78c103c8982") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.4KV6ObINpS + rm /tmp/tmp.1afbJMEITJ /tmp/tmp.4KV6ObINpS + return 0 + insert_data_mongos 100501 myApp1 + local data=100501 + local db_name=myApp1 + local flags= + run_mongos 'use myApp1\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 '' '' '' + local 'command=use myApp1\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dUA6rrqJ59 +++ mktemp ++ local LAST_ERR=/tmp/tmp.aSiwEjrJiw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dUA6rrqJ59 ++ cat /tmp/tmp.aSiwEjrJiw ++ rm /tmp/tmp.dUA6rrqJ59 /tmp/tmp.aSiwEjrJiw ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp1\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.xQRd1RXG4O ++ mktemp + local LAST_ERR=/tmp/tmp.F3p0zdMMf0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp1\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xQRd1RXG4O Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("0931b0b1-0b8b-4d08-88ac-d350e4cc6ad0") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match switched to db myApp1 WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.F3p0zdMMf0 + rm /tmp/tmp.xQRd1RXG4O /tmp/tmp.F3p0zdMMf0 + return 0 + insert_data_mongos 100501 myApp2 + local data=100501 + local db_name=myApp2 + local flags= + run_mongos 'use myApp2\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 '' '' '' + local 'command=use myApp2\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MIvG2o0x4p +++ mktemp ++ local LAST_ERR=/tmp/tmp.xjcOkodMBD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MIvG2o0x4p ++ cat /tmp/tmp.xjcOkodMBD ++ rm /tmp/tmp.MIvG2o0x4p /tmp/tmp.xjcOkodMBD ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp2\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.OJ7YzrtiyE ++ mktemp + local LAST_ERR=/tmp/tmp.MkrATMQqL0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp2\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OJ7YzrtiyE Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("5bb35ccf-475f-44ad-a649-ea48a74858dc") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match switched to db myApp2 WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.MkrATMQqL0 + rm /tmp/tmp.OJ7YzrtiyE /tmp/tmp.MkrATMQqL0 + return 0 + check_data -2nd + local postfix=-2nd ++ seq 0 2 + for i in '$(seq 0 2)' + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-sharded-18059 -2nd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xmLZEixd2K +++ mktemp ++ local LAST_ERR=/tmp/tmp.M2N6YIxDuc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xmLZEixd2K ++ cat /tmp/tmp.M2N6YIxDuc ++ rm /tmp/tmp.xmLZEixd2K /tmp/tmp.M2N6YIxDuc ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.Bv5aUYjPZP ++ mktemp + local LAST_ERR=/tmp/tmp.XqRYVVSq0Q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Bv5aUYjPZP + cat /tmp/tmp.XqRYVVSq0Q + rm /tmp/tmp.Bv5aUYjPZP /tmp/tmp.XqRYVVSq0Q + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/find-2nd.json /tmp/tmp.nvt7xvr20D/find-2nd + for i in '$(seq 0 2)' + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-sharded-18059 -2nd1 .svc.cluster.local myApp1 test + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local postfix=-2nd1 + local suffix=.svc.cluster.local + local database=myApp1 + local collection=test + run_mongos 'use myApp1\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 mongodb .svc.cluster.local + local 'command=use myApp1\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ygz2HQyT7o +++ mktemp ++ local LAST_ERR=/tmp/tmp.3x7mNX6X3A ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ygz2HQyT7o ++ cat /tmp/tmp.3x7mNX6X3A ++ rm /tmp/tmp.ygz2HQyT7o /tmp/tmp.3x7mNX6X3A ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp1\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.fSznObFzDh ++ mktemp + local LAST_ERR=/tmp/tmp.1SckA2lKCS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp1\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fSznObFzDh + cat /tmp/tmp.1SckA2lKCS + rm /tmp/tmp.fSznObFzDh /tmp/tmp.1SckA2lKCS + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/find-2nd1.json /tmp/tmp.nvt7xvr20D/find-2nd1 + for i in '$(seq 0 2)' + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-sharded-18059 -2nd2 .svc.cluster.local myApp2 test + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local postfix=-2nd2 + local suffix=.svc.cluster.local + local database=myApp2 + local collection=test + run_mongos 'use myApp2\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 mongodb .svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp2\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZyuLXI2gRs +++ mktemp ++ local LAST_ERR=/tmp/tmp.GpdKv4PkKr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZyuLXI2gRs ++ cat /tmp/tmp.GpdKv4PkKr ++ rm /tmp/tmp.ZyuLXI2gRs /tmp/tmp.GpdKv4PkKr ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp2\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.CaeUtCiKA1 ++ mktemp + local LAST_ERR=/tmp/tmp.Lcrm3tUsXa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp2\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CaeUtCiKA1 + cat /tmp/tmp.Lcrm3tUsXa + rm /tmp/tmp.CaeUtCiKA1 /tmp/tmp.Lcrm3tUsXa + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/find-2nd2.json /tmp/tmp.nvt7xvr20D/find-2nd2 + run_restore backup-gcp-cs + local backup_name=backup-gcp-cs + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/conf/restore.yml + /usr/bin/sed -e 's/name:/name: restore-backup-gcp-cs/' + /usr/bin/sed -e 's/backupName:/backupName: backup-gcp-cs/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.RBAUVDhsgr ++ mktemp + local LAST_ERR=/tmp/tmp.aM5cEvgIHF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RBAUVDhsgr perconaservermongodbrestore.psmdb.percona.com/restore-backup-gcp-cs created + cat /tmp/tmp.aM5cEvgIHF + rm /tmp/tmp.RBAUVDhsgr /tmp/tmp.aM5cEvgIHF + return 0 + wait_restore backup-gcp-cs some-name + local backup_name=backup-gcp-cs + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=780 + set +o xtrace waiting psmdb-restore/backup-gcp-cs to reach ready state........... + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QN8zV48vbR +++ mktemp ++ local LAST_ERR=/tmp/tmp.X0brdcf1yC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QN8zV48vbR ++ cat /tmp/tmp.X0brdcf1yC ++ rm /tmp/tmp.QN8zV48vbR /tmp/tmp.X0brdcf1yC ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yQzfPRrbrz +++ mktemp ++ local LAST_ERR=/tmp/tmp.dlXDjiihDX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yQzfPRrbrz ++ cat /tmp/tmp.dlXDjiihDX ++ rm /tmp/tmp.yQzfPRrbrz /tmp/tmp.dlXDjiihDX ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qxn1HhJUhb +++ mktemp ++ local LAST_ERR=/tmp/tmp.SBQR3L4t2C ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qxn1HhJUhb ++ cat /tmp/tmp.SBQR3L4t2C ++ rm /tmp/tmp.qxn1HhJUhb /tmp/tmp.SBQR3L4t2C ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8WzNVDL4FV +++ mktemp ++ local LAST_ERR=/tmp/tmp.I8fYOI1rSY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8WzNVDL4FV ++ cat /tmp/tmp.I8fYOI1rSY ++ rm /tmp/tmp.8WzNVDL4FV /tmp/tmp.I8fYOI1rSY ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nEB6tZ9zfW +++ mktemp ++ local LAST_ERR=/tmp/tmp.cXkRg4oxWX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nEB6tZ9zfW ++ cat /tmp/tmp.cXkRg4oxWX ++ rm /tmp/tmp.nEB6tZ9zfW /tmp/tmp.cXkRg4oxWX ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QXiH87xXzk +++ mktemp ++ local LAST_ERR=/tmp/tmp.wQzPbsrHK4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QXiH87xXzk ++ cat /tmp/tmp.wQzPbsrHK4 ++ rm /tmp/tmp.QXiH87xXzk /tmp/tmp.wQzPbsrHK4 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XvjTRbOKbf +++ mktemp ++ local LAST_ERR=/tmp/tmp.r2F3xhoNJQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XvjTRbOKbf ++ cat /tmp/tmp.r2F3xhoNJQ ++ rm /tmp/tmp.XvjTRbOKbf /tmp/tmp.r2F3xhoNJQ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ttlf9qQkem +++ mktemp ++ local LAST_ERR=/tmp/tmp.2tWJD2Rmll ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ttlf9qQkem ++ cat /tmp/tmp.2tWJD2Rmll ++ rm /tmp/tmp.ttlf9qQkem /tmp/tmp.2tWJD2Rmll ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LpYbhqpUEY +++ mktemp ++ local LAST_ERR=/tmp/tmp.LzRVuFnViF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LpYbhqpUEY ++ cat /tmp/tmp.LzRVuFnViF ++ rm /tmp/tmp.LpYbhqpUEY /tmp/tmp.LzRVuFnViF ++ return 0 + [[ ready == \r\e\a\d\y ]] + check_data + local postfix= ++ seq 0 2 + for i in '$(seq 0 2)' + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-sharded-18059 '' .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local postfix= + local suffix=.svc.cluster.local + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7czPCTjS8n +++ mktemp ++ local LAST_ERR=/tmp/tmp.ruAtmaAoYl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7czPCTjS8n ++ cat /tmp/tmp.ruAtmaAoYl ++ rm /tmp/tmp.7czPCTjS8n /tmp/tmp.ruAtmaAoYl ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.wLGCFzcPKV ++ mktemp + local LAST_ERR=/tmp/tmp.nGt1LupmNj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wLGCFzcPKV + cat /tmp/tmp.nGt1LupmNj + rm /tmp/tmp.wLGCFzcPKV /tmp/tmp.nGt1LupmNj + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/find.json /tmp/tmp.nvt7xvr20D/find + for i in '$(seq 0 2)' + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-sharded-18059 1 .svc.cluster.local myApp1 test + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local postfix=1 + local suffix=.svc.cluster.local + local database=myApp1 + local collection=test + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongos 'use myApp1\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 mongodb .svc.cluster.local + local 'command=use myApp1\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.Q1QlXiN1ps +++ mktemp ++ local LAST_ERR=/tmp/tmp.T6hFiJXSCi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Q1QlXiN1ps ++ cat /tmp/tmp.T6hFiJXSCi ++ rm /tmp/tmp.Q1QlXiN1ps /tmp/tmp.T6hFiJXSCi ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp1\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.GilsF1iRS3 ++ mktemp + local LAST_ERR=/tmp/tmp.wjTAKSuqSZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp1\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GilsF1iRS3 + cat /tmp/tmp.wjTAKSuqSZ + rm /tmp/tmp.GilsF1iRS3 /tmp/tmp.wjTAKSuqSZ + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/find1.json /tmp/tmp.nvt7xvr20D/find1 + for i in '$(seq 0 2)' + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-sharded-18059 2 .svc.cluster.local myApp2 test + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local postfix=2 + local suffix=.svc.cluster.local + local database=myApp2 + local collection=test + run_mongos 'use myApp2\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 mongodb .svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp2\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.J9AkZ47h2Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.1jbIxY2quK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.J9AkZ47h2Q ++ cat /tmp/tmp.1jbIxY2quK ++ rm /tmp/tmp.J9AkZ47h2Q /tmp/tmp.1jbIxY2quK ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp2\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.rC4WHTQOwG ++ mktemp + local LAST_ERR=/tmp/tmp.4qabNm3YEt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp2\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rC4WHTQOwG + cat /tmp/tmp.4qabNm3YEt + rm /tmp/tmp.rC4WHTQOwG /tmp/tmp.4qabNm3YEt + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/find2.json /tmp/tmp.nvt7xvr20D/find2 + desc 'check backup and restore -- azure-blob' + set +o xtrace ----------------------------------------------------------------------------------- check backup and restore -- azure-blob ----------------------------------------------------------------------------------- ++ get_backup_dest backup-azure-blob ++ local backup_name=backup-azure-blob ++ kubectl_bin get psmdb-backup backup-azure-blob -o 'jsonpath={.status.destination}' ++ sed 's|s3://||' +++ mktemp ++ sed -e 's/.json$//' ++ local LAST_OUT=/tmp/tmp.73J4hZwiFb ++ sed 's|azure://||' +++ mktemp ++ local LAST_ERR=/tmp/tmp.e70TnfyCQu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-azure-blob -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.73J4hZwiFb ++ cat /tmp/tmp.e70TnfyCQu ++ rm /tmp/tmp.73J4hZwiFb /tmp/tmp.e70TnfyCQu ++ return 0 + backup_dest_azure=operator-testing/psmdb-demand-backup-sharded/2024-05-10T10:54:04Z + gunzip + curl -s https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-sharded/2024-05-10T10:54:04Z/rs0/myApp.test.gz + gunzip + curl -s https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-sharded/2024-05-10T10:54:04Z/rs1/myApp1.test.gz + curl -s https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-sharded/2024-05-10T10:54:04Z/rs2/myApp2.test.gz + gunzip + insert_data_mongos 100501 myApp + local data=100501 + local db_name=myApp + local flags= + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 '' '' '' + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.i5zWt8hbHP +++ mktemp ++ local LAST_ERR=/tmp/tmp.XNHRTewWwA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.i5zWt8hbHP ++ cat /tmp/tmp.XNHRTewWwA ++ rm /tmp/tmp.i5zWt8hbHP /tmp/tmp.XNHRTewWwA ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.CeYnOenYXp ++ mktemp + local LAST_ERR=/tmp/tmp.HTXxz5ku8E + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CeYnOenYXp Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("4a80553c-9289-49ed-af31-3148c1ae0716") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.HTXxz5ku8E + rm /tmp/tmp.CeYnOenYXp /tmp/tmp.HTXxz5ku8E + return 0 + insert_data_mongos 100501 myApp1 + local data=100501 + local db_name=myApp1 + local flags= + run_mongos 'use myApp1\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 '' '' '' + local 'command=use myApp1\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.V3ZM3ArB7l +++ mktemp ++ local LAST_ERR=/tmp/tmp.dexQro6JkP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.V3ZM3ArB7l ++ cat /tmp/tmp.dexQro6JkP ++ rm /tmp/tmp.V3ZM3ArB7l /tmp/tmp.dexQro6JkP ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp1\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.o7FFQ0OkRL ++ mktemp + local LAST_ERR=/tmp/tmp.aZXMcACKW6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp1\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.o7FFQ0OkRL Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("5a29bdda-ea2a-416e-93b2-baf449578820") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match switched to db myApp1 WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.aZXMcACKW6 + rm /tmp/tmp.o7FFQ0OkRL /tmp/tmp.aZXMcACKW6 + return 0 + insert_data_mongos 100501 myApp2 + local data=100501 + local db_name=myApp2 + local flags= + run_mongos 'use myApp2\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 '' '' '' + local 'command=use myApp2\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.p3EIqqVOVZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.kc45aQvMWv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.p3EIqqVOVZ ++ cat /tmp/tmp.kc45aQvMWv ++ rm /tmp/tmp.p3EIqqVOVZ /tmp/tmp.kc45aQvMWv ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp2\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.DXkfWlo67Q ++ mktemp + local LAST_ERR=/tmp/tmp.NYIZ4ISiWP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp2\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DXkfWlo67Q Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("e0e7e715-4dfc-4cda-bbb2-f2e0b042bad4") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match switched to db myApp2 WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.NYIZ4ISiWP + rm /tmp/tmp.DXkfWlo67Q /tmp/tmp.NYIZ4ISiWP + return 0 + check_data -2nd + local postfix=-2nd ++ seq 0 2 + for i in '$(seq 0 2)' + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-sharded-18059 -2nd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.04dsSNjkV3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZiqHTjVvkr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.04dsSNjkV3 ++ cat /tmp/tmp.ZiqHTjVvkr ++ rm /tmp/tmp.04dsSNjkV3 /tmp/tmp.ZiqHTjVvkr ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.GfgHLhNZKZ ++ mktemp + local LAST_ERR=/tmp/tmp.9Iu7kkO2X1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GfgHLhNZKZ + cat /tmp/tmp.9Iu7kkO2X1 + rm /tmp/tmp.GfgHLhNZKZ /tmp/tmp.9Iu7kkO2X1 + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/find-2nd.json /tmp/tmp.nvt7xvr20D/find-2nd + for i in '$(seq 0 2)' + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-sharded-18059 -2nd1 .svc.cluster.local myApp1 test + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local postfix=-2nd1 + local suffix=.svc.cluster.local + local database=myApp1 + local collection=test + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongos 'use myApp1\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 mongodb .svc.cluster.local + local 'command=use myApp1\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tmPtt3gnKT +++ mktemp ++ local LAST_ERR=/tmp/tmp.K961p0eyzg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tmPtt3gnKT ++ cat /tmp/tmp.K961p0eyzg ++ rm /tmp/tmp.tmPtt3gnKT /tmp/tmp.K961p0eyzg ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp1\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.9v4eNcVoRS ++ mktemp + local LAST_ERR=/tmp/tmp.iPBHLmYdxJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp1\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9v4eNcVoRS + cat /tmp/tmp.iPBHLmYdxJ + rm /tmp/tmp.9v4eNcVoRS /tmp/tmp.iPBHLmYdxJ + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/find-2nd1.json /tmp/tmp.nvt7xvr20D/find-2nd1 + for i in '$(seq 0 2)' + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-sharded-18059 -2nd2 .svc.cluster.local myApp2 test + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local postfix=-2nd2 + local suffix=.svc.cluster.local + local database=myApp2 + local collection=test + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongos 'use myApp2\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-sharded-18059 mongodb .svc.cluster.local + local 'command=use myApp2\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-sharded-18059 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EtZuh5Btp7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.jKhhwyh7oO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EtZuh5Btp7 ++ cat /tmp/tmp.jKhhwyh7oO ++ rm /tmp/tmp.EtZuh5Btp7 /tmp/tmp.jKhhwyh7oO ++ return 0 + local client_container=psmdb-client-7469665986-mlg9m + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp2\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.UranRL99ev ++ mktemp + local LAST_ERR=/tmp/tmp.tzWtW0GYkd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-mlg9m -- bash -c 'printf '\''use myApp2\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-sharded-18059.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UranRL99ev + cat /tmp/tmp.tzWtW0GYkd + rm /tmp/tmp.UranRL99ev /tmp/tmp.tzWtW0GYkd + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/compare/find-2nd2.json /tmp/tmp.nvt7xvr20D/find-2nd2 + run_restore backup-azure-blob + local backup_name=backup-azure-blob + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/demand-backup-sharded/conf/restore.yml + /usr/bin/sed -e 's/backupName:/backupName: backup-azure-blob/' + kubectl_bin apply -f - + /usr/bin/sed -e 's/name:/name: restore-backup-azure-blob/' ++ mktemp + local LAST_OUT=/tmp/tmp.yx6lehKSKp ++ mktemp + local LAST_ERR=/tmp/tmp.1g16VyQV6w + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yx6lehKSKp perconaservermongodbrestore.psmdb.percona.com/restore-backup-azure-blob created + cat /tmp/tmp.1g16VyQV6w + rm /tmp/tmp.yx6lehKSKp /tmp/tmp.1g16VyQV6w + return 0 + wait_restore backup-azure-blob some-name + local backup_name=backup-azure-blob + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=780 + set +o xtrace waiting psmdb-restore/backup-azure-blob to reach ready state.................2024-05-10T10:55:15.735Z DEBUG Checking for active jobs {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-aws-s3","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-aws-s3", "reconcileID": "810e6687-54ff-45a6-9c44-96fa497d1e7c", "currentJob": {"Name":"restore-backup-aws-s3","Type":1}} 2024-05-10T10:55:19.982Z DEBUG Checking for active jobs {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-aws-s3","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-aws-s3", "reconcileID": "ad47bf22-f0b6-40e8-a803-78325e511ddd", "currentJob": {"Name":"restore-backup-aws-s3","Type":1}} 2024-05-10T10:55:25.168Z DEBUG Checking for active jobs {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-aws-s3","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-aws-s3", "reconcileID": "0e7fc7c5-8d04-4c01-98bc-db59155679be", "currentJob": {"Name":"restore-backup-aws-s3","Type":1}} 2024-05-10T10:55:25.330Z INFO Restore state changed {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-aws-s3","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-aws-s3", "reconcileID": "0e7fc7c5-8d04-4c01-98bc-db59155679be", "previous": "running", "current": "ready"} 2024-05-10T10:55:43.360Z ERROR Reconciler error {"controller": "psmdb-controller", "object": {"name":"some-name","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "some-name", "reconcileID": "bf93c20a-c697-488f-95b1-b21f1b4486ce", "error": "reconcile mongos: failed to start balancer: failed to get mongos connection: ping mongo: server selection error: context deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.226.19.253:27017: connect: connection refused }, ] }", "errorVerbose": "server selection error: context deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.226.19.253:27017: connect: connection refused }, ] }\nping mongo\ngithub.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo.Dial\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo/mongo.go:112\ngithub.com/percona/percona-server-mongodb-operator/pkg/psmdb.MongosClient\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/psmdb/client.go:85\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*mongoClientProvider).Mongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/connections.go:47\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).mongosClientWithRole\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/connections.go:64\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).enableBalancerIfNeeded\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/balancer.go:77\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1114\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nfailed to get mongos connection\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).enableBalancerIfNeeded\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/balancer.go:79\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1114\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nfailed to start balancer\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1115\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nreconcile mongos\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:409\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:324 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222 2024-05-10T10:55:55.642Z ERROR Reconciler error {"controller": "psmdb-controller", "object": {"name":"some-name","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "some-name", "reconcileID": "c949907f-409a-4948-ad58-b5e1c24e93b0", "error": "reconcile mongos: failed to start balancer: failed to get mongos connection: ping mongo: server selection error: context deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.226.19.253:27017: connect: connection refused }, ] }", "errorVerbose": "server selection error: context deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.226.19.253:27017: connect: connection refused }, ] }\nping mongo\ngithub.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo.Dial\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo/mongo.go:112\ngithub.com/percona/percona-server-mongodb-operator/pkg/psmdb.MongosClient\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/psmdb/client.go:85\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*mongoClientProvider).Mongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/connections.go:47\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).mongosClientWithRole\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/connections.go:64\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).enableBalancerIfNeeded\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/balancer.go:77\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1114\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nfailed to get mongos connection\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).enableBalancerIfNeeded\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/balancer.go:79\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1114\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nfailed to start balancer\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1115\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nreconcile mongos\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:409\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:324 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222 2024-05-10T10:56:08.305Z ERROR Reconciler error {"controller": "psmdb-controller", "object": {"name":"some-name","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "some-name", "reconcileID": "1822b295-1bab-4d48-b2ae-cd53f32e5d19", "error": "reconcile mongos: failed to start balancer: failed to get mongos connection: ping mongo: server selection error: context deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.226.19.253:27017: connect: connection refused }, ] }", "errorVerbose": "server selection error: context deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.226.19.253:27017: connect: connection refused }, ] }\nping mongo\ngithub.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo.Dial\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo/mongo.go:112\ngithub.com/percona/percona-server-mongodb-operator/pkg/psmdb.MongosClient\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/psmdb/client.go:85\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*mongoClientProvider).Mongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/connections.go:47\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).mongosClientWithRole\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/connections.go:64\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).enableBalancerIfNeeded\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/balancer.go:77\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1114\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nfailed to get mongos connection\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).enableBalancerIfNeeded\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/balancer.go:79\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1114\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nfailed to start balancer\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1115\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nreconcile mongos\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:409\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:324 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222 2024-05-10T10:56:10.260Z INFO Cluster state changed {"controller": "psmdb-controller", "object": {"name":"some-name","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "some-name", "reconcileID": "1559902c-6035-4208-8250-c209a4e77a41", "previous": "error", "current": "initializing"} 2024-05-10T10:56:53.596Z INFO balancer enabled {"controller": "psmdb-controller", "object": {"name":"some-name","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "some-name", "reconcileID": "3a84bbd2-c89b-4969-8166-620373077656"} 2024-05-10T10:56:53.707Z INFO Cluster state changed {"controller": "psmdb-controller", "object": {"name":"some-name","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "some-name", "reconcileID": "3a84bbd2-c89b-4969-8166-620373077656", "previous": "initializing", "current": "ready"} 2024-05-10T10:57:39.350Z DEBUG Checking for active jobs {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-gcp-cs", "reconcileID": "b1ae548e-cb71-4d19-a745-30fbb7fbcae8", "currentJob": {"Name":"restore-backup-gcp-cs","Type":1}} 2024-05-10T10:57:39.506Z INFO Setting PBM config {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-gcp-cs", "reconcileID": "b1ae548e-cb71-4d19-a745-30fbb7fbcae8", "backup": "some-name"} 2024-05-10T10:57:46.766Z INFO balancer disabled {"controller": "psmdb-controller", "object": {"name":"some-name","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "some-name", "reconcileID": "e2642258-ff86-4be4-b74b-01c8ea86676b"} 2024-05-10T10:57:47.236Z INFO Cluster state changed {"controller": "psmdb-controller", "object": {"name":"some-name","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "some-name", "reconcileID": "e2642258-ff86-4be4-b74b-01c8ea86676b", "previous": "ready", "current": "initializing"} 2024-05-10T10:57:50.515Z INFO Starting restore {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-gcp-cs", "reconcileID": "b1ae548e-cb71-4d19-a745-30fbb7fbcae8", "backup": "2024-05-10T10:53:27Z"} 2024-05-10T10:57:50.515Z INFO Starting logical restore {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-gcp-cs", "reconcileID": "b1ae548e-cb71-4d19-a745-30fbb7fbcae8", "backup": "2024-05-10T10:53:27Z"} 2024-05-10T10:57:50.000+0000 D [resync] got physical restores list: 0 2024-05-10T10:57:50.000+0000 D [resync] got backups list: 4 2024-05-10T10:57:50.000+0000 D [resync] bcp: 2024-05-07T16:32:55Z.pbm.json 2024-05-10T10:57:51.000+0000 D [resync] bcp: 2024-05-08T08:12:46Z.pbm.json 2024-05-10T10:57:51.000+0000 D [resync] bcp: 2024-05-10T10:36:28Z.pbm.json 2024-05-10T10:57:52.000+0000 D [resync] bcp: 2024-05-10T10:53:27Z.pbm.json 2024-05-10T10:57:52.855Z INFO Sending restore command {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-gcp-cs", "reconcileID": "b1ae548e-cb71-4d19-a745-30fbb7fbcae8", "restoreCmd": "name: 2024-05-10T10:57:52.855574306Z, snapshot: 2024-05-10T10:53:27Z"} 2024-05-10T10:57:52.863Z INFO Restore state changed {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-gcp-cs", "reconcileID": "b1ae548e-cb71-4d19-a745-30fbb7fbcae8", "previous": "", "current": "requested"} 2024-05-10T10:57:53.414Z DEBUG Checking for active jobs {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-gcp-cs", "reconcileID": "3ef8a58e-c15d-40be-a33c-a14e5b411155", "currentJob": {"Name":"restore-backup-gcp-cs","Type":1}} 2024-05-10T10:57:53.591Z INFO Restore state changed {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-gcp-cs", "reconcileID": "3ef8a58e-c15d-40be-a33c-a14e5b411155", "previous": "requested", "current": "running"} 2024-05-10T10:57:53.605Z DEBUG Checking for active jobs {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-gcp-cs", "reconcileID": "07188a7c-7265-4d2a-b234-960e937e17af", "currentJob": {"Name":"restore-backup-gcp-cs","Type":1}} 2024-05-10T10:57:58.415Z DEBUG Checking for active jobs {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-gcp-cs", "reconcileID": "60e06f9f-35be-453d-85db-d77c07f5be4d", "currentJob": {"Name":"restore-backup-gcp-cs","Type":1}} 2024-05-10T10:58:03.572Z DEBUG Checking for active jobs {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-gcp-cs", "reconcileID": "33a487d2-ef98-4f8a-8228-25b9b1df1211", "currentJob": {"Name":"restore-backup-gcp-cs","Type":1}} 2024-05-10T10:58:03.730Z INFO Restore state changed {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-gcp-cs", "reconcileID": "33a487d2-ef98-4f8a-8228-25b9b1df1211", "previous": "running", "current": "ready"} 2024-05-10T10:58:18.804Z ERROR Reconciler error {"controller": "psmdb-controller", "object": {"name":"some-name","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "some-name", "reconcileID": "f11e5807-d768-41b8-bbfe-01b31dd00586", "error": "reconcile mongos: failed to start balancer: failed to get mongos connection: ping mongo: server selection error: context deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.226.19.253:27017: connect: connection refused }, ] }", "errorVerbose": "server selection error: context deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.226.19.253:27017: connect: connection refused }, ] }\nping mongo\ngithub.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo.Dial\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo/mongo.go:112\ngithub.com/percona/percona-server-mongodb-operator/pkg/psmdb.MongosClient\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/psmdb/client.go:85\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*mongoClientProvider).Mongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/connections.go:47\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).mongosClientWithRole\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/connections.go:64\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).enableBalancerIfNeeded\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/balancer.go:77\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1114\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nfailed to get mongos connection\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).enableBalancerIfNeeded\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/balancer.go:79\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1114\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nfailed to start balancer\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1115\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nreconcile mongos\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:409\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:324 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222 2024-05-10T10:58:31.773Z ERROR Reconciler error {"controller": "psmdb-controller", "object": {"name":"some-name","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "some-name", "reconcileID": "7725dde1-1924-45ba-9520-82c4dd89d655", "error": "reconcile mongos: failed to start balancer: failed to get mongos connection: ping mongo: server selection error: context deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.226.19.253:27017: connect: connection refused }, ] }", "errorVerbose": "server selection error: context deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.226.19.253:27017: connect: connection refused }, ] }\nping mongo\ngithub.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo.Dial\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo/mongo.go:112\ngithub.com/percona/percona-server-mongodb-operator/pkg/psmdb.MongosClient\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/psmdb/client.go:85\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*mongoClientProvider).Mongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/connections.go:47\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).mongosClientWithRole\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/connections.go:64\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).enableBalancerIfNeeded\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/balancer.go:77\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1114\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nfailed to get mongos connection\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).enableBalancerIfNeeded\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/balancer.go:79\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1114\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nfailed to start balancer\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1115\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nreconcile mongos\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:409\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:324 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222 2024-05-10T10:58:43.773Z ERROR Reconciler error {"controller": "psmdb-controller", "object": {"name":"some-name","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "some-name", "reconcileID": "f20e50db-1e0d-4593-a556-35ecc95051e4", "error": "reconcile mongos: failed to start balancer: failed to get mongos connection: ping mongo: server selection error: context deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.226.19.253:27017: connect: connection refused }, ] }", "errorVerbose": "server selection error: context deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.226.19.253:27017: connect: connection refused }, ] }\nping mongo\ngithub.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo.Dial\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo/mongo.go:112\ngithub.com/percona/percona-server-mongodb-operator/pkg/psmdb.MongosClient\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/psmdb/client.go:85\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*mongoClientProvider).Mongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/connections.go:47\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).mongosClientWithRole\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/connections.go:64\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).enableBalancerIfNeeded\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/balancer.go:77\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1114\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nfailed to get mongos connection\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).enableBalancerIfNeeded\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/balancer.go:79\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1114\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nfailed to start balancer\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1115\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nreconcile mongos\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:409\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:324 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222 2024-05-10T10:58:56.047Z ERROR Reconciler error {"controller": "psmdb-controller", "object": {"name":"some-name","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "some-name", "reconcileID": "014b14f7-876b-4688-9dbc-d84644e289e9", "error": "reconcile mongos: failed to start balancer: failed to get mongos connection: ping mongo: server selection error: context deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.226.19.253:27017: connect: connection refused }, ] }", "errorVerbose": "server selection error: context deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: some-name-mongos.demand-backup-sharded-18059.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.226.19.253:27017: connect: connection refused }, ] }\nping mongo\ngithub.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo.Dial\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo/mongo.go:112\ngithub.com/percona/percona-server-mongodb-operator/pkg/psmdb.MongosClient\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/psmdb/client.go:85\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*mongoClientProvider).Mongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/connections.go:47\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).mongosClientWithRole\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/connections.go:64\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).enableBalancerIfNeeded\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/balancer.go:77\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1114\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nfailed to get mongos connection\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).enableBalancerIfNeeded\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/balancer.go:79\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1114\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nfailed to start balancer\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1115\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:407\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695\nreconcile mongos\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:409\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:114\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:311\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1695"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:324 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:261 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.18.1/pkg/internal/controller/controller.go:222 2024-05-10T10:58:58.513Z INFO Cluster state changed {"controller": "psmdb-controller", "object": {"name":"some-name","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "some-name", "reconcileID": "68266bf5-ff64-4490-8230-4e57b7fc58be", "previous": "error", "current": "initializing"} 2024-05-10T10:59:32.576Z INFO Cluster state changed {"controller": "psmdb-controller", "object": {"name":"some-name","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "some-name", "reconcileID": "8684d9df-908c-46f4-8ed0-670ac1449d63", "previous": "initializing", "current": "ready"} 2024-05-10T11:00:12.781Z DEBUG Checking for active jobs {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-azure-blob","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-azure-blob", "reconcileID": "9e49e91e-591f-4e77-82d9-53cef2308582", "currentJob": {"Name":"restore-backup-azure-blob","Type":1}} 2024-05-10T11:00:12.950Z INFO Setting PBM config {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-azure-blob","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-azure-blob", "reconcileID": "9e49e91e-591f-4e77-82d9-53cef2308582", "backup": "some-name"} 2024-05-10T11:00:18.125Z INFO balancer disabled {"controller": "psmdb-controller", "object": {"name":"some-name","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "some-name", "reconcileID": "22e5f3a0-8a64-40a5-a0ee-d9bdc74dc4a3"} 2024-05-10T11:00:18.666Z INFO Cluster state changed {"controller": "psmdb-controller", "object": {"name":"some-name","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "some-name", "reconcileID": "22e5f3a0-8a64-40a5-a0ee-d9bdc74dc4a3", "previous": "ready", "current": "initializing"} 2024-05-10T11:00:23.960Z INFO Starting restore {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-azure-blob","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-azure-blob", "reconcileID": "9e49e91e-591f-4e77-82d9-53cef2308582", "backup": "2024-05-10T10:54:04Z"} 2024-05-10T11:00:23.960Z INFO Starting logical restore {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-azure-blob","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-azure-blob", "reconcileID": "9e49e91e-591f-4e77-82d9-53cef2308582", "backup": "2024-05-10T10:54:04Z"} 2024-05-10T11:00:24.000+0000 D [resync] got physical restores list: 0 2024-05-10T11:00:25.000+0000 D [resync] got backups list: 3 2024-05-10T11:00:25.000+0000 D [resync] bcp: 2024-05-07T16:33:38Z.pbm.json 2024-05-10T11:00:25.000+0000 D [resync] bcp: 2024-05-08T08:13:29Z.pbm.json 2024-05-10T11:00:26.000+0000 D [resync] bcp: 2024-05-10T10:54:04Z.pbm.json 2024-05-10T11:00:26.446Z INFO Sending restore command {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-azure-blob","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-azure-blob", "reconcileID": "9e49e91e-591f-4e77-82d9-53cef2308582", "restoreCmd": "name: 2024-05-10T11:00:26.446266288Z, snapshot: 2024-05-10T10:54:04Z"} 2024-05-10T11:00:26.457Z INFO Restore state changed {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-azure-blob","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-azure-blob", "reconcileID": "9e49e91e-591f-4e77-82d9-53cef2308582", "previous": "", "current": "requested"} 2024-05-10T11:00:26.469Z DEBUG Checking for active jobs {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-azure-blob","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-azure-blob", "reconcileID": "42e55ee6-24da-486a-9eed-b473c7e055ff", "currentJob": {"Name":"restore-backup-azure-blob","Type":1}} 2024-05-10T11:00:26.671Z INFO Waiting for restore metadata {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-azure-blob","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-azure-blob", "reconcileID": "42e55ee6-24da-486a-9eed-b473c7e055ff", "pbmName": "2024-05-10T11:00:26.446266288Z", "restore": "restore-backup-azure-blob", "backup": "backup-azure-blob"} 2024-05-10T11:00:31.469Z DEBUG Checking for active jobs {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-azure-blob","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-azure-blob", "reconcileID": "b1c1be4e-3e6a-4af1-98a9-9b3b37b73131", "currentJob": {"Name":"restore-backup-azure-blob","Type":1}} 2024-05-10T11:00:31.673Z INFO Restore state changed {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-azure-blob","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-azure-blob", "reconcileID": "b1c1be4e-3e6a-4af1-98a9-9b3b37b73131", "previous": "requested", "current": "running"} 2024-05-10T11:00:31.690Z DEBUG Checking for active jobs {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-azure-blob","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-azure-blob", "reconcileID": "a80e4350-489c-4130-86d0-7029665fe0c2", "currentJob": {"Name":"restore-backup-azure-blob","Type":1}} 2024-05-10T11:00:36.691Z DEBUG Checking for active jobs {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-azure-blob","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-azure-blob", "reconcileID": "2841e8ce-4b3e-4200-bdae-b5b147524b55", "currentJob": {"Name":"restore-backup-azure-blob","Type":1}} 2024-05-10T11:00:41.855Z DEBUG Checking for active jobs {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-azure-blob","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-azure-blob", "reconcileID": "01309224-e58b-4cce-af08-83493628cae8", "currentJob": {"Name":"restore-backup-azure-blob","Type":1}} 2024-05-10T11:00:47.025Z DEBUG Checking for active jobs {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-azure-blob","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-azure-blob", "reconcileID": "0efb04d3-741a-4ab2-bafb-503ad5dbd407", "currentJob": {"Name":"restore-backup-azure-blob","Type":1}} 2024-05-10T11:00:47.195Z INFO Restore state changed {"controller": "psmdbrestore-controller", "object": {"name":"restore-backup-azure-blob","namespace":"demand-backup-sharded-18059"}, "namespace": "demand-backup-sharded-18059", "name": "restore-backup-azure-blob", "reconcileID": "0efb04d3-741a-4ab2-bafb-503ad5dbd407", "previous": "running", "current": "error"} apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDBRestore metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDBRestore","metadata":{"annotations":{},"name":"restore-backup-azure-blob","namespace":"demand-backup-sharded-18059"},"spec":{"backupName":"backup-azure-blob","clusterName":"some-name"}} creationTimestamp: "2024-05-10T11:00:12Z" generation: 1 name: restore-backup-azure-blob namespace: demand-backup-sharded-18059 resourceVersion: "26751" uid: 264af8b3-7348-40e5-a666-7ff9945219de spec: backupName: backup-azure-blob clusterName: some-name status: error: 'waiting for running: cluster failed: waiting for running: cluster failed: couldn''t get response from all shards: convergeClusterWithTimeout: reached converge timeout' pbmName: "2024-05-10T11:00:26.446266288Z" state: error Restore object restore-backup-azure-blob is in error state. something went wrong with operator or kubernetes cluster