Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/logs/balancer.log WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + main + create_infra balancer-13428 + local ns=balancer-13428 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.eYLHnhhXFM ++ mktemp + local LAST_ERR=/tmp/tmp.z06PMrqjuo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eYLHnhhXFM customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.z06PMrqjuo + rm /tmp/tmp.eYLHnhhXFM /tmp/tmp.z06PMrqjuo + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/deploy/crd.yaml + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE No resources found + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.3Rm7PQz1ak ++ mktemp + local LAST_ERR=/tmp/tmp.NLaIqY36YL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3Rm7PQz1ak + cat /tmp/tmp.NLaIqY36YL + rm /tmp/tmp.3Rm7PQz1ak /tmp/tmp.NLaIqY36YL + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.6RhGgxhtLS ++ mktemp + local LAST_ERR=/tmp/tmp.NonY6OOVjh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6RhGgxhtLS + cat /tmp/tmp.NonY6OOVjh + rm /tmp/tmp.6RhGgxhtLS /tmp/tmp.NonY6OOVjh + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.irHN2nCSxr ++ mktemp + local LAST_ERR=/tmp/tmp.H7WEKXnnBw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.irHN2nCSxr + cat /tmp/tmp.H7WEKXnnBw + rm /tmp/tmp.irHN2nCSxr /tmp/tmp.H7WEKXnnBw + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.0ZNtV3sAdi ++ mktemp + local LAST_ERR=/tmp/tmp.Bz9OLpeJZZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0ZNtV3sAdi clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.Bz9OLpeJZZ + rm /tmp/tmp.0ZNtV3sAdi /tmp/tmp.Bz9OLpeJZZ + return 0 + check_crd_for_deletion PR-1932-138a34ea + local git_tag=PR-1932-138a34ea ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1932-138a34ea/deploy/crd.yaml ++ /usr/bin/sed s/---//g ++ yq eval .metadata.name ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bxXmMhpqSI +++ mktemp ++ local LAST_ERR=/tmp/tmp.3yjc5Buih3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.bxXmMhpqSI ++ cat /tmp/tmp.3yjc5Buih3 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.bxXmMhpqSI ++ cat /tmp/tmp.3yjc5Buih3 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.bxXmMhpqSI ++ cat /tmp/tmp.3yjc5Buih3 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.bxXmMhpqSI ++ cat /tmp/tmp.3yjc5Buih3 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.bxXmMhpqSI /tmp/tmp.3yjc5Buih3 ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' ++ tail -n1 + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + kubectl_bin get ns ++ mktemp + local LAST_OUT=/tmp/tmp.L3aB6r4YHA ++ mktemp + local LAST_OUT=/tmp/tmp.2TxSvoACGW + local LAST_ERR=/tmp/tmp.QSuMxpYlWG + local exit_status=0 + local timeout=4 ++ mktemp + local LAST_ERR=/tmp/tmp.HLZfVfKX6b + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns + awk '{print$1}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.L3aB6r4YHA + cat /tmp/tmp.QSuMxpYlWG + rm /tmp/tmp.L3aB6r4YHA /tmp/tmp.QSuMxpYlWG + return 0 namespace "balancer-1330" deleted namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2TxSvoACGW namespace "psmdb-operator" deleted + cat /tmp/tmp.HLZfVfKX6b + rm /tmp/tmp.2TxSvoACGW /tmp/tmp.HLZfVfKX6b + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.K9VCh3tyM7 ++ mktemp + local LAST_ERR=/tmp/tmp.fsi6QwThji + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.K9VCh3tyM7 + cat /tmp/tmp.fsi6QwThji + rm /tmp/tmp.K9VCh3tyM7 /tmp/tmp.fsi6QwThji + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ojaAsCo4LV ++ mktemp + local LAST_ERR=/tmp/tmp.1NMezUCCUG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ojaAsCo4LV namespace/psmdb-operator created + cat /tmp/tmp.1NMezUCCUG + rm /tmp/tmp.ojaAsCo4LV /tmp/tmp.1NMezUCCUG + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.591kmXQEfa +++ mktemp ++ local LAST_ERR=/tmp/tmp.uKpIDpL5y4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.591kmXQEfa ++ cat /tmp/tmp.uKpIDpL5y4 ++ rm /tmp/tmp.591kmXQEfa /tmp/tmp.uKpIDpL5y4 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1932-138a34ea-1-cluster2 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.YFhji2Kx93 ++ mktemp + local LAST_ERR=/tmp/tmp.QT5ARGejld + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1932-138a34ea-1-cluster2 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YFhji2Kx93 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1932-138a34ea-1-cluster2" modified. + cat /tmp/tmp.QT5ARGejld + rm /tmp/tmp.YFhji2Kx93 /tmp/tmp.QT5ARGejld + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/balancer/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.6L9mJE33kn ++ mktemp + local LAST_ERR=/tmp/tmp.YsIvUXw9oO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6L9mJE33kn customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.YsIvUXw9oO + rm /tmp/tmp.6L9mJE33kn /tmp/tmp.YsIvUXw9oO + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.57QO2f2JwH ++ mktemp + local LAST_ERR=/tmp/tmp.cUTjfUXNU1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.57QO2f2JwH clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.cUTjfUXNU1 + rm /tmp/tmp.57QO2f2JwH /tmp/tmp.cUTjfUXNU1 + return 0 + kubectl_bin apply -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1932-138a34ea") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.8wOmu9PujN ++ mktemp + local LAST_ERR=/tmp/tmp.rJqleOvYC8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8wOmu9PujN deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.rJqleOvYC8 + rm /tmp/tmp.8wOmu9PujN /tmp/tmp.rJqleOvYC8 + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.gBiIwaeIC8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Zm1RXDzKHv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gBiIwaeIC8 ++ cat /tmp/tmp.Zm1RXDzKHv ++ rm /tmp/tmp.gBiIwaeIC8 /tmp/tmp.Zm1RXDzKHv ++ return 0 + wait_pod percona-server-mongodb-operator-78748cc64c-gdjcm + local pod=percona-server-mongodb-operator-78748cc64c-gdjcm + set +o xtrace waiting for pod/percona-server-mongodb-operator-78748cc64c-gdjcm to be ready.OK + create_namespace balancer-13428 + local namespace=balancer-13428 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl api-resources ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces balancer-13428' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces balancer-13428 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace balancer-13428 --ignore-not-found ++ mktemp + awk '{print$1}' + xargs kubectl delete ns + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.OsB3QD0Ejf + local LAST_OUT=/tmp/tmp.BJjIDIbwTz ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.1KbmSV4Ss6 + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.KJVoFI6mcn + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace balancer-13428 --ignore-not-found + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BJjIDIbwTz + cat /tmp/tmp.1KbmSV4Ss6 + rm /tmp/tmp.BJjIDIbwTz /tmp/tmp.1KbmSV4Ss6 + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OsB3QD0Ejf + cat /tmp/tmp.KJVoFI6mcn + rm /tmp/tmp.OsB3QD0Ejf /tmp/tmp.KJVoFI6mcn + return 0 + kubectl_bin wait --for=delete namespace balancer-13428 ++ mktemp + local LAST_OUT=/tmp/tmp.Gm4Ur98x9p ++ mktemp + local LAST_ERR=/tmp/tmp.P96HgFuCMg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace balancer-13428 namespace "gke-managed-cim" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Gm4Ur98x9p + cat /tmp/tmp.P96HgFuCMg + rm /tmp/tmp.Gm4Ur98x9p /tmp/tmp.P96HgFuCMg + return 0 + desc 'create namespace balancer-13428' + set +o xtrace ----------------------------------------------------------------------------------- create namespace balancer-13428 ----------------------------------------------------------------------------------- + kubectl_bin create namespace balancer-13428 ++ mktemp + local LAST_OUT=/tmp/tmp.NbvSN1Z2E2 ++ mktemp + local LAST_ERR=/tmp/tmp.iEjeJ8lvz2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace balancer-13428 namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NbvSN1Z2E2 namespace/balancer-13428 created + cat /tmp/tmp.iEjeJ8lvz2 + rm /tmp/tmp.NbvSN1Z2E2 /tmp/tmp.iEjeJ8lvz2 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.Kb7da1JWLG +++ mktemp ++ local LAST_ERR=/tmp/tmp.p5zYwBFCUo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Kb7da1JWLG ++ cat /tmp/tmp.p5zYwBFCUo ++ rm /tmp/tmp.Kb7da1JWLG /tmp/tmp.p5zYwBFCUo ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1932-138a34ea-1-cluster2 --namespace=balancer-13428 ++ mktemp + local LAST_OUT=/tmp/tmp.dvP1S81mx7 ++ mktemp + local LAST_ERR=/tmp/tmp.2lIEUOstTq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1932-138a34ea-1-cluster2 --namespace=balancer-13428 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dvP1S81mx7 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1932-138a34ea-1-cluster2" modified. + cat /tmp/tmp.2lIEUOstTq + rm /tmp/tmp.dvP1S81mx7 /tmp/tmp.2lIEUOstTq + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Mon May 19 02:38:59 2025 NAMESPACE: balancer-13428 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.balancer-13428.svc.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace balancer-13428 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace balancer-13428 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace balancer-13428 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace balancer-13428 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.P0Wq1mMVQN +++ mktemp ++ local LAST_ERR=/tmp/tmp.zL8XvFBPX0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.P0Wq1mMVQN ++ cat /tmp/tmp.zL8XvFBPX0 ++ rm /tmp/tmp.P0Wq1mMVQN /tmp/tmp.zL8XvFBPX0 ++ return 0 + MINIO_POD=minio-service-8967c7f7f-r2gpj + wait_pod minio-service-8967c7f7f-r2gpj + local pod=minio-service-8967c7f7f-r2gpj + set +o xtrace waiting for pod/minio-service-8967c7f7f-r2gpj to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.balancer-13428.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.lYWSRsOiXh ++ mktemp + local LAST_ERR=/tmp/tmp.uZm7EuGIcf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.balancer-13428.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lYWSRsOiXh service/minio-service created + cat /tmp/tmp.uZm7EuGIcf + rm /tmp/tmp.lYWSRsOiXh /tmp/tmp.uZm7EuGIcf + return 0 + create_minio_bucket operator-testing + local bucket=operator-testing + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.rF2WkdG5O1 ++ mktemp + local LAST_ERR=/tmp/tmp.5X5wlit7Vx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rF2WkdG5O1 pod "aws-cli" deleted + cat /tmp/tmp.5X5wlit7Vx If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.rF2WkdG5O1 /tmp/tmp.5X5wlit7Vx + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.g4dBbcwzPu ++ mktemp + local LAST_ERR=/tmp/tmp.wd5UhusAfQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g4dBbcwzPu secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.wd5UhusAfQ + rm /tmp/tmp.g4dBbcwzPu /tmp/tmp.wd5UhusAfQ + return 0 + log 'create PSMDB cluster' + set +o xtrace [2025-05-19T02:39:49+0000] create PSMDB cluster + cluster=some-name + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/conf/client-70.yml ++ mktemp + local LAST_OUT=/tmp/tmp.SN7gtUvJCb ++ mktemp + local LAST_ERR=/tmp/tmp.yzWNKhNMbZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/conf/client-70.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SN7gtUvJCb secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.yzWNKhNMbZ + rm /tmp/tmp.SN7gtUvJCb /tmp/tmp.yzWNKhNMbZ + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/balancer/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/balancer/conf/some-name-rs0.yml + kubectl_bin apply -f - + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1932-138a34ea"' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/balancer/conf/some-name-rs0.yml + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + local LAST_OUT=/tmp/tmp.YHsT8LlaQg ++ mktemp + local LAST_ERR=/tmp/tmp.zA8DiyO5bi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YHsT8LlaQg perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.zA8DiyO5bi + rm /tmp/tmp.YHsT8LlaQg /tmp/tmp.zA8DiyO5bi + return 0 + log 'check if cfg pods started' + set +o xtrace [2025-05-19T02:39:54+0000] check if cfg pods started + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.....OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xtawiCuMAQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.YjhqbASkpf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xtawiCuMAQ ++ cat /tmp/tmp.YjhqbASkpf ++ rm /tmp/tmp.xtawiCuMAQ /tmp/tmp.YjhqbASkpf ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.....OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9tWtIuBlPa +++ mktemp ++ local LAST_ERR=/tmp/tmp.1cF25SR78W ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9tWtIuBlPa ++ cat /tmp/tmp.1cF25SR78W ++ rm /tmp/tmp.9tWtIuBlPa /tmp/tmp.1cF25SR78W ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + log 'check if all shards started' + set +o xtrace [2025-05-19T02:41:23+0000] check if all shards started + wait_for_running some-name-rs0 3 false + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fpO5lsqxaH +++ mktemp ++ local LAST_ERR=/tmp/tmp.t8X9h5AlXY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fpO5lsqxaH ++ cat /tmp/tmp.t8X9h5AlXY ++ rm /tmp/tmp.fpO5lsqxaH /tmp/tmp.t8X9h5AlXY ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.I6FCGhkfAv +++ mktemp ++ local LAST_ERR=/tmp/tmp.RlbZPnA5Xx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.I6FCGhkfAv ++ cat /tmp/tmp.RlbZPnA5Xx ++ rm /tmp/tmp.I6FCGhkfAv /tmp/tmp.RlbZPnA5Xx ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-rs1 3 false + local name=some-name-rs1 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs1 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs1-0 + local pod=some-name-rs1-0 + set +o xtrace waiting for pod/some-name-rs1-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs1-1 + local pod=some-name-rs1-1 + set +o xtrace waiting for pod/some-name-rs1-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.s3WoOXekFQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.d1dEWcBsU5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.s3WoOXekFQ ++ cat /tmp/tmp.d1dEWcBsU5 ++ rm /tmp/tmp.s3WoOXekFQ /tmp/tmp.d1dEWcBsU5 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs1-2 + local pod=some-name-rs1-2 + set +o xtrace waiting for pod/some-name-rs1-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zTm4gNbogB +++ mktemp ++ local LAST_ERR=/tmp/tmp.xZKFjJXvIk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zTm4gNbogB ++ cat /tmp/tmp.xZKFjJXvIk ++ rm /tmp/tmp.zTm4gNbogB /tmp/tmp.xZKFjJXvIk ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-rs2 3 false + local name=some-name-rs2 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs2 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs2-0 + local pod=some-name-rs2-0 + set +o xtrace waiting for pod/some-name-rs2-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs2-1 + local pod=some-name-rs2-1 + set +o xtrace waiting for pod/some-name-rs2-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qx0xYevnLH +++ mktemp ++ local LAST_ERR=/tmp/tmp.JP4l0ItiZS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qx0xYevnLH ++ cat /tmp/tmp.JP4l0ItiZS ++ rm /tmp/tmp.qx0xYevnLH /tmp/tmp.JP4l0ItiZS ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs2-2 + local pod=some-name-rs2-2 + set +o xtrace waiting for pod/some-name-rs2-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QQLMHvE8hT +++ mktemp ++ local LAST_ERR=/tmp/tmp.chWZxzRSlT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QQLMHvE8hT ++ cat /tmp/tmp.chWZxzRSlT ++ rm /tmp/tmp.QQLMHvE8hT /tmp/tmp.chWZxzRSlT ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + log 'check if mongos pods started' + set +o xtrace [2025-05-19T02:42:07+0000] check if mongos pods started + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.....OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.l1GJoQUQdV +++ mktemp ++ local LAST_ERR=/tmp/tmp.JdPghQoX83 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.l1GJoQUQdV ++ cat /tmp/tmp.JdPghQoX83 ++ rm /tmp/tmp.l1GJoQUQdV /tmp/tmp.JdPghQoX83 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.....OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1F6PuaZfmn +++ mktemp ++ local LAST_ERR=/tmp/tmp.bCOz7JjeeI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1F6PuaZfmn ++ cat /tmp/tmp.bCOz7JjeeI ++ rm /tmp/tmp.1F6PuaZfmn /tmp/tmp.bCOz7JjeeI ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + check_balancer some-name true 10 + local cluster=some-name + local expected=true + local delay=10 + local balancer_running + log 'sleeping for 10 seconds...' + set +o xtrace [2025-05-19T02:43:03+0000] sleeping for 10 seconds... + sleep 10 ++ run_mongosh 'sh.getBalancerState()' clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 ++ grep -E -v 'Warning|cfg' ++ local 'command=sh.getBalancerState()' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local ++ local mongo_flag= ++ grep -E 'true|false' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PiQTgQKymx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wnzW2ioeDM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.PiQTgQKymx +++ cat /tmp/tmp.wnzW2ioeDM +++ rm /tmp/tmp.PiQTgQKymx /tmp/tmp.wnzW2ioeDM +++ return 0 ++ local client_container=psmdb-client-65ff95489b-4bzk9 ++ [[ clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 == *cfg* ]] ++ replica_set=cfg ++ kubectl_bin exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.getBalancerState()\n'\'' | mongosh --quiet mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.v5Zgns0QvM +++ mktemp ++ local LAST_ERR=/tmp/tmp.tU11RjG7SX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.getBalancerState()\n'\'' | mongosh --quiet mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v5Zgns0QvM ++ cat /tmp/tmp.tU11RjG7SX ++ rm /tmp/tmp.v5Zgns0QvM /tmp/tmp.tU11RjG7SX ++ return 0 + balancer_running=true + echo -n 'checking if balancer status is true...' checking if balancer status is true...+ [[ true != \t\r\u\e ]] + echo OK OK + write_data some-name + local cluster=some-name + log 'create user' + set +o xtrace [2025-05-19T02:43:16+0000] create user + run_mongos 'db.createUser({user:"user",pwd:"pass",roles:[{db:"app",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.balancer-13428 mongodb .svc.cluster.local --quiet 27017 mongosh + local 'command=db.createUser({user:"user",pwd:"pass",roles:[{db:"app",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.balancer-13428 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag=--quiet + local port=27017 + local mongo_bin=mongosh ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8BtPxxTLWU +++ mktemp ++ local LAST_ERR=/tmp/tmp.QHyexsMjV6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8BtPxxTLWU ++ cat /tmp/tmp.QHyexsMjV6 ++ rm /tmp/tmp.8BtPxxTLWU /tmp/tmp.QHyexsMjV6 ++ return 0 + local client_container=psmdb-client-65ff95489b-4bzk9 + kubectl_bin exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''db.createUser({user:"user",pwd:"pass",roles:[{db:"app",role:"readWrite"}]})\n'\'' | mongosh mongodb://userAdmin:userAdmin123456@some-name-mongos.balancer-13428.svc.cluster.local:27017/admin --quiet' ++ mktemp + local LAST_OUT=/tmp/tmp.yR0Mhd4Gzz ++ mktemp + local LAST_ERR=/tmp/tmp.6WK4qQW0rK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''db.createUser({user:"user",pwd:"pass",roles:[{db:"app",role:"readWrite"}]})\n'\'' | mongosh mongodb://userAdmin:userAdmin123456@some-name-mongos.balancer-13428.svc.cluster.local:27017/admin --quiet' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yR0Mhd4Gzz [direct: mongos] admin> { ok: 1, '$clusterTime': { clusterTime: Timestamp({ t: 1747622600, i: 2 }), signature: { hash: Binary.createFromBase64('PPsjSfSl0nVFqiwXt8xkTerBYD0=', 0), keyId: Long('7505981642167549976') } }, operationTime: Timestamp({ t: 1747622600, i: 2 }) } [direct: mongos] admin> + cat /tmp/tmp.6WK4qQW0rK + rm /tmp/tmp.yR0Mhd4Gzz /tmp/tmp.6WK4qQW0rK + return 0 + sleep 2 + log 'enable sharding' + set +o xtrace [2025-05-19T02:43:22+0000] enable sharding + run_mongos 'sh.enableSharding("app")' clusterAdmin:clusterAdmin123456@some-name-mongos.balancer-13428 mongodb .svc.cluster.local --quiet 27017 mongosh + local 'command=sh.enableSharding("app")' + local uri=clusterAdmin:clusterAdmin123456@some-name-mongos.balancer-13428 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag=--quiet + local port=27017 + local mongo_bin=mongosh ++ awk -F: '{print $2}' ++ echo .svc.cluster.local + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eRZVor44lM +++ mktemp ++ local LAST_ERR=/tmp/tmp.We23UEdoab ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eRZVor44lM ++ cat /tmp/tmp.We23UEdoab ++ rm /tmp/tmp.eRZVor44lM /tmp/tmp.We23UEdoab ++ return 0 + local client_container=psmdb-client-65ff95489b-4bzk9 + kubectl_bin exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.enableSharding("app")\n'\'' | mongosh mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.balancer-13428.svc.cluster.local:27017/admin --quiet' ++ mktemp + local LAST_OUT=/tmp/tmp.dghDa8tiIs ++ mktemp + local LAST_ERR=/tmp/tmp.bBnupxRmFi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.enableSharding("app")\n'\'' | mongosh mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.balancer-13428.svc.cluster.local:27017/admin --quiet' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dghDa8tiIs [direct: mongos] admin> { ok: 1, '$clusterTime': { clusterTime: Timestamp({ t: 1747622605, i: 6 }), signature: { hash: Binary.createFromBase64('Jn856j44bdf3/33BH0I4TA31kow=', 0), keyId: Long('7505981642167549976') } }, operationTime: Timestamp({ t: 1747622605, i: 3 }) } [direct: mongos] admin> + cat /tmp/tmp.bBnupxRmFi + rm /tmp/tmp.dghDa8tiIs /tmp/tmp.bBnupxRmFi + return 0 + sleep 2 + log 'shard collection' + set +o xtrace [2025-05-19T02:43:27+0000] shard collection + run_mongos 'sh.shardCollection("app.city", { _id: 1 } )' clusterAdmin:clusterAdmin123456@some-name-mongos.balancer-13428 mongodb .svc.cluster.local --quiet 27017 mongosh + local 'command=sh.shardCollection("app.city", { _id: 1 } )' + local uri=clusterAdmin:clusterAdmin123456@some-name-mongos.balancer-13428 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag=--quiet + local port=27017 + local mongo_bin=mongosh ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8XwnZln1LK +++ mktemp ++ local LAST_ERR=/tmp/tmp.i1mWA8le2Q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8XwnZln1LK ++ cat /tmp/tmp.i1mWA8le2Q ++ rm /tmp/tmp.8XwnZln1LK /tmp/tmp.i1mWA8le2Q ++ return 0 + local client_container=psmdb-client-65ff95489b-4bzk9 + kubectl_bin exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.shardCollection("app.city", { _id: 1 } )\n'\'' | mongosh mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.balancer-13428.svc.cluster.local:27017/admin --quiet' ++ mktemp + local LAST_OUT=/tmp/tmp.lFkUEDYsfV ++ mktemp + local LAST_ERR=/tmp/tmp.iRTabJ6E7A + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.shardCollection("app.city", { _id: 1 } )\n'\'' | mongosh mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.balancer-13428.svc.cluster.local:27017/admin --quiet' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lFkUEDYsfV [direct: mongos] admin> { collectionsharded: 'app.city', ok: 1, '$clusterTime': { clusterTime: Timestamp({ t: 1747622611, i: 37 }), signature: { hash: Binary.createFromBase64('3RvDAKhs/P0Laz08WSh1dmWp9RM=', 0), keyId: Long('7505981642167549976') } }, operationTime: Timestamp({ t: 1747622611, i: 36 }) } [direct: mongos] admin> + cat /tmp/tmp.iRTabJ6E7A + rm /tmp/tmp.lFkUEDYsfV /tmp/tmp.iRTabJ6E7A + return 0 + log 'write data (this can take some time, be patient)' + set +o xtrace [2025-05-19T02:43:31+0000] write data (this can take some time, be patient) + run_script_mongos /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/balancer/data.js user:pass@some-name-mongos.balancer-13428 mongodb .svc.cluster.local --quiet mongosh + local script=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/balancer/data.js + local uri=user:pass@some-name-mongos.balancer-13428 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag=--quiet + local mongo_bin=mongosh ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9zOwCRfosD +++ mktemp ++ local LAST_ERR=/tmp/tmp.CfPnzmUjHY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9zOwCRfosD ++ cat /tmp/tmp.CfPnzmUjHY ++ rm /tmp/tmp.9zOwCRfosD /tmp/tmp.CfPnzmUjHY ++ return 0 + local client_container=psmdb-client-65ff95489b-4bzk9 ++ basename /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/balancer/data.js + name=data.js + kubectl_bin cp /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/balancer/data.js balancer-13428/psmdb-client-65ff95489b-4bzk9:/tmp ++ mktemp + local LAST_OUT=/tmp/tmp.Qmp3vxYBGI ++ mktemp + local LAST_ERR=/tmp/tmp.BblNj6dtYh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl cp /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/balancer/data.js balancer-13428/psmdb-client-65ff95489b-4bzk9:/tmp + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Qmp3vxYBGI + cat /tmp/tmp.BblNj6dtYh + rm /tmp/tmp.Qmp3vxYBGI /tmp/tmp.BblNj6dtYh + return 0 + kubectl_bin exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'mongosh mongodb://user:pass@some-name-mongos.balancer-13428.svc.cluster.local/admin --quiet /tmp/data.js' ++ mktemp + local LAST_OUT=/tmp/tmp.ZPca2ZBfeA ++ mktemp + local LAST_ERR=/tmp/tmp.2Hh4ASs6E4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'mongosh mongodb://user:pass@some-name-mongos.balancer-13428.svc.cluster.local/admin --quiet /tmp/data.js' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZPca2ZBfeA + cat /tmp/tmp.2Hh4ASs6E4 + rm /tmp/tmp.ZPca2ZBfeA /tmp/tmp.2Hh4ASs6E4 + return 0 + check_backup_and_restore some-name 0 true + local cluster=some-name + local backup_suffix=0 + local balancer_end_state=true + local backup_name=backup-minio-0 + log 'running backup: backup-minio-0' + set +o xtrace [2025-05-19T02:45:34+0000] running backup: backup-minio-0 + run_backup minio backup-minio-0 + local storage=minio + local backup_name=backup-minio-0 + local type=logical + desc 'run backup backup-minio-0' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-0 ----------------------------------------------------------------------------------- + yq eval '.metadata.name = "backup-minio-0" | .spec.storageName = "minio" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/balancer/conf/backup-minio.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.eXbsLJrZmq ++ mktemp + local LAST_ERR=/tmp/tmp.0AdLQ4Nlln + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eXbsLJrZmq perconaservermongodbbackup.psmdb.percona.com/backup-minio-0 created + cat /tmp/tmp.0AdLQ4Nlln + rm /tmp/tmp.eXbsLJrZmq /tmp/tmp.0AdLQ4Nlln + return 0 + wait_backup backup-minio-0 requested + local backup_name=backup-minio-0 + local target_state=requested + set +o xtrace waiting for backup-minio-0 to reach requested state. + log 'checking if balancer is disabled' + set +o xtrace [2025-05-19T02:45:40+0000] checking if balancer is disabled + check_balancer some-name false + local cluster=some-name + local expected=false + local delay=0 + local balancer_running + log 'sleeping for 0 seconds...' + set +o xtrace [2025-05-19T02:45:40+0000] sleeping for 0 seconds... + sleep 0 ++ run_mongosh 'sh.getBalancerState()' clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 ++ local 'command=sh.getBalancerState()' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local ++ local mongo_flag= +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ grep -E 'true|false' ++ grep -E -v 'Warning|cfg' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.p8aMyrlw4y ++++ mktemp +++ local LAST_ERR=/tmp/tmp.subypDLcBq +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.p8aMyrlw4y +++ cat /tmp/tmp.subypDLcBq +++ rm /tmp/tmp.p8aMyrlw4y /tmp/tmp.subypDLcBq +++ return 0 ++ local client_container=psmdb-client-65ff95489b-4bzk9 ++ [[ clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 == *cfg* ]] ++ replica_set=cfg ++ kubectl_bin exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.getBalancerState()\n'\'' | mongosh --quiet mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LTYbqfhNJZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZmIvzapGRm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.getBalancerState()\n'\'' | mongosh --quiet mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LTYbqfhNJZ ++ cat /tmp/tmp.ZmIvzapGRm ++ rm /tmp/tmp.LTYbqfhNJZ /tmp/tmp.ZmIvzapGRm ++ return 0 + balancer_running=false + echo -n 'checking if balancer status is false...' checking if balancer status is false...+ [[ false != \f\a\l\s\e ]] + echo OK OK + wait_backup backup-minio-0 ready + local backup_name=backup-minio-0 + local target_state=ready + set +o xtrace waiting for backup-minio-0 to reach ready state............ + log 'checking if balancer is true after backup' + set +o xtrace [2025-05-19T02:46:10+0000] checking if balancer is true after backup + check_balancer some-name true 10 + local cluster=some-name + local expected=true + local delay=10 + local balancer_running + log 'sleeping for 10 seconds...' + set +o xtrace [2025-05-19T02:46:10+0000] sleeping for 10 seconds... + sleep 10 ++ grep -E -v 'Warning|cfg' ++ run_mongosh 'sh.getBalancerState()' clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 ++ local 'command=sh.getBalancerState()' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local ++ local mongo_flag= +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ grep -E 'true|false' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CuB38gyNCx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zhUqTlxrwF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.CuB38gyNCx +++ cat /tmp/tmp.zhUqTlxrwF +++ rm /tmp/tmp.CuB38gyNCx /tmp/tmp.zhUqTlxrwF +++ return 0 ++ local client_container=psmdb-client-65ff95489b-4bzk9 ++ [[ clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 == *cfg* ]] ++ replica_set=cfg ++ kubectl_bin exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.getBalancerState()\n'\'' | mongosh --quiet mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tfZLH83LEy +++ mktemp ++ local LAST_ERR=/tmp/tmp.MbOSgafn1E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.getBalancerState()\n'\'' | mongosh --quiet mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tfZLH83LEy ++ cat /tmp/tmp.MbOSgafn1E ++ rm /tmp/tmp.tfZLH83LEy /tmp/tmp.MbOSgafn1E ++ return 0 + balancer_running=true + echo -n 'checking if balancer status is true...' checking if balancer status is true...+ [[ true != \t\r\u\e ]] + echo OK OK + log 'running restore: restore-backup-minio-0' + set +o xtrace [2025-05-19T02:46:23+0000] running restore: restore-backup-minio-0 + run_restore backup-minio-0 + local backup_name=backup-minio-0 + kubectl_bin apply -f - + /usr/bin/sed -e 's/backupName:/backupName: backup-minio-0/' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/balancer/conf/restore.yml ++ mktemp + local LAST_OUT=/tmp/tmp.x2uWZs5bBF + /usr/bin/sed -e 's/name:/name: restore-backup-minio-0/' ++ mktemp + local LAST_ERR=/tmp/tmp.Gk9o0eL8kN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.x2uWZs5bBF perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-0 created + cat /tmp/tmp.Gk9o0eL8kN + rm /tmp/tmp.x2uWZs5bBF /tmp/tmp.Gk9o0eL8kN + return 0 + wait_restore backup-minio-0 some-name requested 0 + local backup_name=backup-minio-0 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace waiting psmdb-restore/restore-backup-minio-0 to reach requested state...........OK + '[' 0 -eq 1 ']' + log 'checking if balancer is disabled' + set +o xtrace [2025-05-19T02:46:47+0000] checking if balancer is disabled + check_balancer some-name false + local cluster=some-name + local expected=false + local delay=0 + local balancer_running + log 'sleeping for 0 seconds...' + set +o xtrace [2025-05-19T02:46:47+0000] sleeping for 0 seconds... + sleep 0 ++ run_mongosh 'sh.getBalancerState()' clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 ++ local 'command=sh.getBalancerState()' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local ++ local mongo_flag= +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp ++ grep -E -v 'Warning|cfg' ++ grep -E 'true|false' +++ local LAST_OUT=/tmp/tmp.vv4LUde8eJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wn29VzNhXy +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.vv4LUde8eJ +++ cat /tmp/tmp.wn29VzNhXy +++ rm /tmp/tmp.vv4LUde8eJ /tmp/tmp.wn29VzNhXy +++ return 0 ++ local client_container=psmdb-client-65ff95489b-4bzk9 ++ [[ clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 == *cfg* ]] ++ replica_set=cfg ++ kubectl_bin exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.getBalancerState()\n'\'' | mongosh --quiet mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pZOLxP7Tug +++ mktemp ++ local LAST_ERR=/tmp/tmp.OE4pC3rZ78 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.getBalancerState()\n'\'' | mongosh --quiet mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pZOLxP7Tug ++ cat /tmp/tmp.OE4pC3rZ78 ++ rm /tmp/tmp.pZOLxP7Tug /tmp/tmp.OE4pC3rZ78 ++ return 0 + balancer_running=false + echo -n 'checking if balancer status is false...' checking if balancer status is false...+ [[ false != \f\a\l\s\e ]] + echo OK OK + wait_restore backup-minio-0 some-name ready 1 + local backup_name=backup-minio-0 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace waiting psmdb-restore/restore-backup-minio-0 to reach ready state..........................................................OK + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.d4rMjYf7s6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.pcyQADm0cR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.d4rMjYf7s6 ++ cat /tmp/tmp.pcyQADm0cR ++ rm /tmp/tmp.d4rMjYf7s6 /tmp/tmp.pcyQADm0cR ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RY1DGXaTdu +++ mktemp ++ local LAST_ERR=/tmp/tmp.A3gG3u2P3B ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RY1DGXaTdu ++ cat /tmp/tmp.A3gG3u2P3B ++ rm /tmp/tmp.RY1DGXaTdu /tmp/tmp.A3gG3u2P3B ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JGPqR08aA3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QnjEcfDo1T ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JGPqR08aA3 ++ cat /tmp/tmp.QnjEcfDo1T ++ rm /tmp/tmp.JGPqR08aA3 /tmp/tmp.QnjEcfDo1T ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yKh8sg428x +++ mktemp ++ local LAST_ERR=/tmp/tmp.sB9UCfgVLF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yKh8sg428x ++ cat /tmp/tmp.sB9UCfgVLF ++ rm /tmp/tmp.yKh8sg428x /tmp/tmp.sB9UCfgVLF ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + log 'checking if balancer is true after restore' + set +o xtrace [2025-05-19T02:49:23+0000] checking if balancer is true after restore + check_balancer some-name true 10 + local cluster=some-name + local expected=true + local delay=10 + local balancer_running + log 'sleeping for 10 seconds...' + set +o xtrace [2025-05-19T02:49:23+0000] sleeping for 10 seconds... + sleep 10 ++ grep -E -v 'Warning|cfg' ++ run_mongosh 'sh.getBalancerState()' clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 ++ local 'command=sh.getBalancerState()' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local ++ local mongo_flag= ++ grep -E 'true|false' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rzKzcEiZ69 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.lXbXJunmFq +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.rzKzcEiZ69 +++ cat /tmp/tmp.lXbXJunmFq +++ rm /tmp/tmp.rzKzcEiZ69 /tmp/tmp.lXbXJunmFq +++ return 0 ++ local client_container=psmdb-client-65ff95489b-4bzk9 ++ [[ clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 == *cfg* ]] ++ replica_set=cfg ++ kubectl_bin exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.getBalancerState()\n'\'' | mongosh --quiet mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.c0ih469veI +++ mktemp ++ local LAST_ERR=/tmp/tmp.SoRQovxsFY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.getBalancerState()\n'\'' | mongosh --quiet mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.c0ih469veI ++ cat /tmp/tmp.SoRQovxsFY ++ rm /tmp/tmp.c0ih469veI /tmp/tmp.SoRQovxsFY ++ return 0 + balancer_running=true + echo -n 'checking if balancer status is true...' checking if balancer status is true...+ [[ true != \t\r\u\e ]] + echo OK OK + log 'disabling balancer' + set +o xtrace [2025-05-19T02:49:37+0000] disabling balancer + kubectl patch psmdb some-name --type=merge -p '{"spec":{"sharding":{"balancer":{"enabled":false}}}}' perconaservermongodb.psmdb.percona.com/some-name patched + check_balancer some-name false 10 + local cluster=some-name + local expected=false + local delay=10 + local balancer_running + log 'sleeping for 10 seconds...' + set +o xtrace [2025-05-19T02:49:39+0000] sleeping for 10 seconds... + sleep 10 ++ run_mongosh 'sh.getBalancerState()' clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 ++ local 'command=sh.getBalancerState()' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local ++ local mongo_flag= ++ grep -E -v 'Warning|cfg' ++ grep -E 'true|false' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.htxquucqui ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FqYZ9CfXgx +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.htxquucqui +++ cat /tmp/tmp.FqYZ9CfXgx +++ rm /tmp/tmp.htxquucqui /tmp/tmp.FqYZ9CfXgx +++ return 0 ++ local client_container=psmdb-client-65ff95489b-4bzk9 ++ [[ clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 == *cfg* ]] ++ replica_set=cfg ++ kubectl_bin exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.getBalancerState()\n'\'' | mongosh --quiet mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.abb0kG1G4Y +++ mktemp ++ local LAST_ERR=/tmp/tmp.Gari9tBM2p ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.getBalancerState()\n'\'' | mongosh --quiet mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.abb0kG1G4Y ++ cat /tmp/tmp.Gari9tBM2p ++ rm /tmp/tmp.abb0kG1G4Y /tmp/tmp.Gari9tBM2p ++ return 0 + balancer_running=false + echo -n 'checking if balancer status is false...' checking if balancer status is false...+ [[ false != \f\a\l\s\e ]] + echo OK OK + check_backup_and_restore some-name 1 false + local cluster=some-name + local backup_suffix=1 + local balancer_end_state=false + local backup_name=backup-minio-1 + log 'running backup: backup-minio-1' + set +o xtrace [2025-05-19T02:49:53+0000] running backup: backup-minio-1 + run_backup minio backup-minio-1 + local storage=minio + local backup_name=backup-minio-1 + local type=logical + desc 'run backup backup-minio-1' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-1 ----------------------------------------------------------------------------------- + yq eval '.metadata.name = "backup-minio-1" | .spec.storageName = "minio" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/balancer/conf/backup-minio.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.15msw8ixc3 ++ mktemp + local LAST_ERR=/tmp/tmp.qqP78FVqMK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.15msw8ixc3 perconaservermongodbbackup.psmdb.percona.com/backup-minio-1 created + cat /tmp/tmp.qqP78FVqMK + rm /tmp/tmp.15msw8ixc3 /tmp/tmp.qqP78FVqMK + return 0 + wait_backup backup-minio-1 requested + local backup_name=backup-minio-1 + local target_state=requested + set +o xtrace waiting for backup-minio-1 to reach requested state. + log 'checking if balancer is disabled' + set +o xtrace [2025-05-19T02:49:58+0000] checking if balancer is disabled + check_balancer some-name false + local cluster=some-name + local expected=false + local delay=0 + local balancer_running + log 'sleeping for 0 seconds...' + set +o xtrace [2025-05-19T02:49:58+0000] sleeping for 0 seconds... + sleep 0 ++ run_mongosh 'sh.getBalancerState()' clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 ++ local 'command=sh.getBalancerState()' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local ++ local mongo_flag= +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wF7jKi5fEj ++++ mktemp +++ local LAST_ERR=/tmp/tmp.q7SfriJYD7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ grep -E -v 'Warning|cfg' ++ grep -E 'true|false' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wF7jKi5fEj +++ cat /tmp/tmp.q7SfriJYD7 +++ rm /tmp/tmp.wF7jKi5fEj /tmp/tmp.q7SfriJYD7 +++ return 0 ++ local client_container=psmdb-client-65ff95489b-4bzk9 ++ [[ clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 == *cfg* ]] ++ replica_set=cfg ++ kubectl_bin exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.getBalancerState()\n'\'' | mongosh --quiet mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SbXHM5BfgR +++ mktemp ++ local LAST_ERR=/tmp/tmp.RcS5wG5Qnv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.getBalancerState()\n'\'' | mongosh --quiet mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SbXHM5BfgR ++ cat /tmp/tmp.RcS5wG5Qnv ++ rm /tmp/tmp.SbXHM5BfgR /tmp/tmp.RcS5wG5Qnv ++ return 0 + balancer_running=false + echo -n 'checking if balancer status is false...' checking if balancer status is false...+ [[ false != \f\a\l\s\e ]] + echo OK OK + wait_backup backup-minio-1 ready + local backup_name=backup-minio-1 + local target_state=ready + set +o xtrace waiting for backup-minio-1 to reach ready state............ + log 'checking if balancer is false after backup' + set +o xtrace [2025-05-19T02:50:28+0000] checking if balancer is false after backup + check_balancer some-name false 10 + local cluster=some-name + local expected=false + local delay=10 + local balancer_running + log 'sleeping for 10 seconds...' + set +o xtrace [2025-05-19T02:50:28+0000] sleeping for 10 seconds... + sleep 10 ++ run_mongosh 'sh.getBalancerState()' clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 ++ local 'command=sh.getBalancerState()' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local ++ local mongo_flag= +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp ++ grep -E -v 'Warning|cfg' +++ local LAST_OUT=/tmp/tmp.nUjb3xEgzs ++ grep -E 'true|false' ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FkDfwos8rc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.nUjb3xEgzs +++ cat /tmp/tmp.FkDfwos8rc +++ rm /tmp/tmp.nUjb3xEgzs /tmp/tmp.FkDfwos8rc +++ return 0 ++ local client_container=psmdb-client-65ff95489b-4bzk9 ++ [[ clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428 == *cfg* ]] ++ replica_set=cfg ++ kubectl_bin exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.getBalancerState()\n'\'' | mongosh --quiet mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PBIiAtV4B1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.J486XcJsmh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-65ff95489b-4bzk9 -- bash -c 'printf '\''sh.getBalancerState()\n'\'' | mongosh --quiet mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.balancer-13428.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PBIiAtV4B1 ++ cat /tmp/tmp.J486XcJsmh ++ rm /tmp/tmp.PBIiAtV4B1 /tmp/tmp.J486XcJsmh ++ return 0 + balancer_running=false + echo -n 'checking if balancer status is false...' checking if balancer status is false...+ [[ false != \f\a\l\s\e ]] + echo OK OK + log 'running restore: restore-backup-minio-1' + set +o xtrace [2025-05-19T02:50:42+0000] running restore: restore-backup-minio-1 + run_restore backup-minio-1 + local backup_name=backup-minio-1 + kubectl_bin apply -f - + /usr/bin/sed -e 's/name:/name: restore-backup-minio-1/' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1932/e2e-tests/balancer/conf/restore.yml + /usr/bin/sed -e 's/backupName:/backupName: backup-minio-1/' ++ mktemp + local LAST_OUT=/tmp/tmp.1u7LFbos9y ++ mktemp + local LAST_ERR=/tmp/tmp.Z6F3LwESpX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1u7LFbos9y perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-1 created + cat /tmp/tmp.Z6F3LwESpX + rm /tmp/tmp.1u7LFbos9y /tmp/tmp.Z6F3LwESpX + return 0 + wait_restore backup-minio-1 some-name requested 0 + local backup_name=backup-minio-1 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace waiting psmdb-restore/restore-backup-minio-1 to reach requested state....................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................2025-05-19T02:50:11.705Z DEBUG Checking for active jobs {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "4a875c63-46c2-45e5-9da5-968424ea5492", "currentJob": {"Name":"backup-minio-1","Type":0}} 2025-05-19T02:50:11.779Z INFO Acquiring the backup lock {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "4a875c63-46c2-45e5-9da5-968424ea5492"} 2025-05-19T02:50:11.780Z DEBUG backupStatus Got backup meta {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "4a875c63-46c2-45e5-9da5-968424ea5492", "backup": "backup-minio-1", "pbmName": "2025-05-19T02:49:56Z", "meta": {"type":"logical","opid":"682a9c54563f237f99c7ed1f","name":"2025-05-19T02:49:56Z","shardRemap":{"cfg":"config"},"replsets":[{"name":"rs2","backup_name":"2025-05-19T02:49:56Z/rs2/metadata.json","oplog_name":"2025-05-19T02:49:56Z/rs2/oplog","start_ts":1747622997,"status":"dumpDone","last_transition_ts":1747623001,"first_write_ts":{"T":1747622994,"I":1},"last_write_ts":{"T":0,"I":0},"node":"some-name-rs2-1.some-name-rs2.balancer-13428.svc.cluster.local:27017","conditions":[{"timestamp":1747622997,"status":"running"},{"timestamp":1747623001,"status":"dumpDone"}],"pbm_version":"2.9.1","mongo_version":"8.0.8-3"},{"name":"rs0","backup_name":"2025-05-19T02:49:56Z/rs0/metadata.json","oplog_name":"2025-05-19T02:49:56Z/rs0/oplog","start_ts":1747622997,"status":"dumpDone","last_transition_ts":1747623001,"first_write_ts":{"T":1747622994,"I":2},"last_write_ts":{"T":0,"I":0},"node":"some-name-rs0-2.some-name-rs0.balancer-13428.svc.cluster.local:27017","conditions":[{"timestamp":1747622997,"status":"running"},{"timestamp":1747623001,"status":"dumpDone"}],"pbm_version":"2.9.1","mongo_version":"8.0.8-3"},{"name":"rs1","backup_name":"2025-05-19T02:49:56Z/rs1/metadata.json","oplog_name":"2025-05-19T02:49:56Z/rs1/oplog","start_ts":1747622997,"status":"running","last_transition_ts":1747622997,"first_write_ts":{"T":1747622991,"I":3},"last_write_ts":{"T":0,"I":0},"node":"some-name-rs1-2.some-name-rs1.balancer-13428.svc.cluster.local:27017","conditions":[{"timestamp":1747622997,"status":"running"}],"pbm_version":"2.9.1","mongo_version":"8.0.8-3"},{"name":"cfg","backup_name":"2025-05-19T02:49:56Z/cfg/metadata.json","oplog_name":"2025-05-19T02:49:56Z/cfg/oplog","start_ts":1747622997,"status":"dumpDone","iscs":true,"last_transition_ts":1747623000,"first_write_ts":{"T":1747622997,"I":43},"last_write_ts":{"T":0,"I":0},"node":"some-name-cfg-1.some-name-cfg.balancer-13428.svc.cluster.local:27017","conditions":[{"timestamp":1747622997,"status":"running"},{"timestamp":1747623000,"status":"dumpDone"}],"pbm_version":"2.9.1","mongo_version":"8.0.8-3"}],"compression":"gzip","store":{"type":"s3","s3":{"region":"us-east-1","endpointUrl":"http://minio-service:9000/","forcePathStyle":true,"bucket":"operator-testing","maxUploadParts":10000,"storageClass":"STANDARD","insecureSkipTLSVerify":false}},"size":0,"mongodb_version":"8.0.8-3","fcv":"8.0","start_ts":1747622997,"last_transition_ts":1747622999,"first_write_ts":{"T":1747622997,"I":43},"last_write_ts":{"T":1,"I":1},"hb":{"T":1747623007,"I":7},"status":"running","conditions":[{"timestamp":1747622997,"status":"starting"},{"timestamp":1747622999,"status":"running"}],"n":[{"rs":"cfg","n":["some-name-cfg-1.some-name-cfg.balancer-13428.svc.cluster.local:27017","some-name-cfg-2.some-name-cfg.balancer-13428.svc.cluster.local:27017"],"ack":"some-name-cfg-1.some-name-cfg.balancer-13428.svc.cluster.local:27017"},{"rs":"rs2","n":["some-name-rs2-1.some-name-rs2.balancer-13428.svc.cluster.local:27017","some-name-rs2-2.some-name-rs2.balancer-13428.svc.cluster.local:27017"],"ack":"some-name-rs2-1.some-name-rs2.balancer-13428.svc.cluster.local:27017"},{"rs":"rs1","n":["some-name-rs1-1.some-name-rs1.balancer-13428.svc.cluster.local:27017","some-name-rs1-2.some-name-rs1.balancer-13428.svc.cluster.local:27017"],"ack":"some-name-rs1-2.some-name-rs1.balancer-13428.svc.cluster.local:27017"},{"rs":"rs0","n":["some-name-rs0-1.some-name-rs0.balancer-13428.svc.cluster.local:27017","some-name-rs0-2.some-name-rs0.balancer-13428.svc.cluster.local:27017"],"ack":"some-name-rs0-2.some-name-rs0.balancer-13428.svc.cluster.local:27017"}],"pbm_version":"2.9.1","balancer":"off"}} 2025-05-19T02:50:11.782Z DEBUG Reconcile finished {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "4a875c63-46c2-45e5-9da5-968424ea5492"} 2025-05-19T02:50:16.783Z DEBUG Reconciling {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "0fa8d2d3-f52c-4ed5-9ea7-73202db03504"} 2025-05-19T02:50:16.990Z DEBUG checking if backup is allowed {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "0fa8d2d3-f52c-4ed5-9ea7-73202db03504", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:50:16.990Z DEBUG Checking for active jobs {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "0fa8d2d3-f52c-4ed5-9ea7-73202db03504", "currentJob": {"Name":"backup-minio-1","Type":0}} 2025-05-19T02:50:17.117Z INFO Acquiring the backup lock {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "0fa8d2d3-f52c-4ed5-9ea7-73202db03504"} 2025-05-19T02:50:17.121Z DEBUG backupStatus Got backup meta {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "0fa8d2d3-f52c-4ed5-9ea7-73202db03504", "backup": "backup-minio-1", "pbmName": "2025-05-19T02:49:56Z", "meta": {"type":"logical","opid":"682a9c54563f237f99c7ed1f","name":"2025-05-19T02:49:56Z","shardRemap":{"cfg":"config"},"replsets":[{"name":"rs2","backup_name":"2025-05-19T02:49:56Z/rs2/metadata.json","oplog_name":"2025-05-19T02:49:56Z/rs2/oplog","start_ts":1747622997,"status":"done","last_transition_ts":1747623014,"first_write_ts":{"T":1747622994,"I":1},"last_write_ts":{"T":1747623000,"I":14},"node":"some-name-rs2-1.some-name-rs2.balancer-13428.svc.cluster.local:27017","conditions":[{"timestamp":1747622997,"status":"running"},{"timestamp":1747623001,"status":"dumpDone"},{"timestamp":1747623014,"status":"done"}],"pbm_version":"2.9.1","mongo_version":"8.0.8-3"},{"name":"rs0","backup_name":"2025-05-19T02:49:56Z/rs0/metadata.json","oplog_name":"2025-05-19T02:49:56Z/rs0/oplog","start_ts":1747622997,"status":"done","last_transition_ts":1747623015,"first_write_ts":{"T":1747622994,"I":2},"last_write_ts":{"T":1747623000,"I":18},"node":"some-name-rs0-2.some-name-rs0.balancer-13428.svc.cluster.local:27017","conditions":[{"timestamp":1747622997,"status":"running"},{"timestamp":1747623001,"status":"dumpDone"},{"timestamp":1747623015,"status":"done"}],"pbm_version":"2.9.1","mongo_version":"8.0.8-3"},{"name":"rs1","backup_name":"2025-05-19T02:49:56Z/rs1/metadata.json","oplog_name":"2025-05-19T02:49:56Z/rs1/oplog","start_ts":1747622997,"status":"dumpDone","last_transition_ts":1747623012,"first_write_ts":{"T":1747622991,"I":3},"last_write_ts":{"T":0,"I":0},"node":"some-name-rs1-2.some-name-rs1.balancer-13428.svc.cluster.local:27017","conditions":[{"timestamp":1747622997,"status":"running"},{"timestamp":1747623012,"status":"dumpDone"}],"pbm_version":"2.9.1","mongo_version":"8.0.8-3"},{"name":"cfg","backup_name":"2025-05-19T02:49:56Z/cfg/metadata.json","oplog_name":"2025-05-19T02:49:56Z/cfg/oplog","start_ts":1747622997,"status":"dumpDone","iscs":true,"last_transition_ts":1747623000,"first_write_ts":{"T":1747622997,"I":43},"last_write_ts":{"T":1747623013,"I":1},"node":"some-name-cfg-1.some-name-cfg.balancer-13428.svc.cluster.local:27017","conditions":[{"timestamp":1747622997,"status":"running"},{"timestamp":1747623000,"status":"dumpDone"}],"pbm_version":"2.9.1","mongo_version":"8.0.8-3"}],"compression":"gzip","store":{"type":"s3","s3":{"region":"us-east-1","endpointUrl":"http://minio-service:9000/","forcePathStyle":true,"bucket":"operator-testing","maxUploadParts":10000,"storageClass":"STANDARD","insecureSkipTLSVerify":false}},"size":23428,"mongodb_version":"8.0.8-3","fcv":"8.0","start_ts":1747622997,"last_transition_ts":1747623013,"first_write_ts":{"T":1747622997,"I":43},"last_write_ts":{"T":1,"I":1},"hb":{"T":1747623012,"I":6},"status":"dumpDone","conditions":[{"timestamp":1747622997,"status":"starting"},{"timestamp":1747622999,"status":"running"},{"timestamp":1747623013,"status":"dumpDone"}],"n":[{"rs":"cfg","n":["some-name-cfg-1.some-name-cfg.balancer-13428.svc.cluster.local:27017","some-name-cfg-2.some-name-cfg.balancer-13428.svc.cluster.local:27017"],"ack":"some-name-cfg-1.some-name-cfg.balancer-13428.svc.cluster.local:27017"},{"rs":"rs2","n":["some-name-rs2-1.some-name-rs2.balancer-13428.svc.cluster.local:27017","some-name-rs2-2.some-name-rs2.balancer-13428.svc.cluster.local:27017"],"ack":"some-name-rs2-1.some-name-rs2.balancer-13428.svc.cluster.local:27017"},{"rs":"rs1","n":["some-name-rs1-1.some-name-rs1.balancer-13428.svc.cluster.local:27017","some-name-rs1-2.some-name-rs1.balancer-13428.svc.cluster.local:27017"],"ack":"some-name-rs1-2.some-name-rs1.balancer-13428.svc.cluster.local:27017"},{"rs":"rs0","n":["some-name-rs0-1.some-name-rs0.balancer-13428.svc.cluster.local:27017","some-name-rs0-2.some-name-rs0.balancer-13428.svc.cluster.local:27017"],"ack":"some-name-rs0-2.some-name-rs0.balancer-13428.svc.cluster.local:27017"}],"pbm_version":"2.9.1","balancer":"off"}} 2025-05-19T02:50:17.124Z DEBUG Reconcile finished {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "0fa8d2d3-f52c-4ed5-9ea7-73202db03504"} 2025-05-19T02:50:22.125Z DEBUG Reconciling {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "aa247674-5f61-4c9d-b755-d12aada4f822"} 2025-05-19T02:50:22.196Z DEBUG checking if backup is allowed {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "aa247674-5f61-4c9d-b755-d12aada4f822", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:50:22.196Z DEBUG Checking for active jobs {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "aa247674-5f61-4c9d-b755-d12aada4f822", "currentJob": {"Name":"backup-minio-1","Type":0}} 2025-05-19T02:50:22.274Z INFO Acquiring the backup lock {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "aa247674-5f61-4c9d-b755-d12aada4f822"} 2025-05-19T02:50:22.275Z DEBUG backupStatus Got backup meta {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "aa247674-5f61-4c9d-b755-d12aada4f822", "backup": "backup-minio-1", "pbmName": "2025-05-19T02:49:56Z", "meta": {"type":"logical","opid":"682a9c54563f237f99c7ed1f","name":"2025-05-19T02:49:56Z","shardRemap":{"cfg":"config"},"replsets":[{"name":"rs2","backup_name":"2025-05-19T02:49:56Z/rs2/metadata.json","oplog_name":"2025-05-19T02:49:56Z/rs2/oplog","start_ts":1747622997,"status":"done","last_transition_ts":1747623014,"first_write_ts":{"T":1747622994,"I":1},"last_write_ts":{"T":1747623000,"I":14},"node":"some-name-rs2-1.some-name-rs2.balancer-13428.svc.cluster.local:27017","conditions":[{"timestamp":1747622997,"status":"running"},{"timestamp":1747623001,"status":"dumpDone"},{"timestamp":1747623014,"status":"done"}],"pbm_version":"2.9.1","mongo_version":"8.0.8-3"},{"name":"rs0","backup_name":"2025-05-19T02:49:56Z/rs0/metadata.json","oplog_name":"2025-05-19T02:49:56Z/rs0/oplog","start_ts":1747622997,"status":"done","last_transition_ts":1747623015,"first_write_ts":{"T":1747622994,"I":2},"last_write_ts":{"T":1747623000,"I":18},"node":"some-name-rs0-2.some-name-rs0.balancer-13428.svc.cluster.local:27017","conditions":[{"timestamp":1747622997,"status":"running"},{"timestamp":1747623001,"status":"dumpDone"},{"timestamp":1747623015,"status":"done"}],"pbm_version":"2.9.1","mongo_version":"8.0.8-3"},{"name":"rs1","backup_name":"2025-05-19T02:49:56Z/rs1/metadata.json","oplog_name":"2025-05-19T02:49:56Z/rs1/oplog","start_ts":1747622997,"status":"done","last_transition_ts":1747623021,"first_write_ts":{"T":1747622991,"I":3},"last_write_ts":{"T":1747623011,"I":2},"node":"some-name-rs1-2.some-name-rs1.balancer-13428.svc.cluster.local:27017","conditions":[{"timestamp":1747622997,"status":"running"},{"timestamp":1747623012,"status":"dumpDone"},{"timestamp":1747623021,"status":"done"}],"pbm_version":"2.9.1","mongo_version":"8.0.8-3"},{"name":"cfg","backup_name":"2025-05-19T02:49:56Z/cfg/metadata.json","oplog_name":"2025-05-19T02:49:56Z/cfg/oplog","start_ts":1747622997,"status":"dumpDone","iscs":true,"last_transition_ts":1747623000,"first_write_ts":{"T":1747622997,"I":43},"last_write_ts":{"T":1747623013,"I":1},"node":"some-name-cfg-1.some-name-cfg.balancer-13428.svc.cluster.local:27017","conditions":[{"timestamp":1747622997,"status":"running"},{"timestamp":1747623000,"status":"dumpDone"}],"pbm_version":"2.9.1","mongo_version":"8.0.8-3"}],"compression":"gzip","store":{"type":"s3","s3":{"region":"us-east-1","endpointUrl":"http://minio-service:9000/","forcePathStyle":true,"bucket":"operator-testing","maxUploadParts":10000,"storageClass":"STANDARD","insecureSkipTLSVerify":false}},"size":26671617,"mongodb_version":"8.0.8-3","fcv":"8.0","start_ts":1747622997,"last_transition_ts":1747623013,"first_write_ts":{"T":1747622997,"I":43},"last_write_ts":{"T":1,"I":1},"hb":{"T":1747623017,"I":252},"status":"dumpDone","conditions":[{"timestamp":1747622997,"status":"starting"},{"timestamp":1747622999,"status":"running"},{"timestamp":1747623013,"status":"dumpDone"}],"n":[{"rs":"cfg","n":["some-name-cfg-1.some-name-cfg.balancer-13428.svc.cluster.local:27017","some-name-cfg-2.some-name-cfg.balancer-13428.svc.cluster.local:27017"],"ack":"some-name-cfg-1.some-name-cfg.balancer-13428.svc.cluster.local:27017"},{"rs":"rs2","n":["some-name-rs2-1.some-name-rs2.balancer-13428.svc.cluster.local:27017","some-name-rs2-2.some-name-rs2.balancer-13428.svc.cluster.local:27017"],"ack":"some-name-rs2-1.some-name-rs2.balancer-13428.svc.cluster.local:27017"},{"rs":"rs1","n":["some-name-rs1-1.some-name-rs1.balancer-13428.svc.cluster.local:27017","some-name-rs1-2.some-name-rs1.balancer-13428.svc.cluster.local:27017"],"ack":"some-name-rs1-2.some-name-rs1.balancer-13428.svc.cluster.local:27017"},{"rs":"rs0","n":["some-name-rs0-1.some-name-rs0.balancer-13428.svc.cluster.local:27017","some-name-rs0-2.some-name-rs0.balancer-13428.svc.cluster.local:27017"],"ack":"some-name-rs0-2.some-name-rs0.balancer-13428.svc.cluster.local:27017"}],"pbm_version":"2.9.1","balancer":"off"}} 2025-05-19T02:50:22.277Z DEBUG Reconcile finished {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "aa247674-5f61-4c9d-b755-d12aada4f822"} 2025-05-19T02:50:27.278Z DEBUG Reconciling {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "2caea05f-e197-42e4-903b-9426aa20fb3e"} 2025-05-19T02:50:27.348Z DEBUG checking if backup is allowed {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "2caea05f-e197-42e4-903b-9426aa20fb3e", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:50:27.348Z DEBUG Checking for active jobs {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "2caea05f-e197-42e4-903b-9426aa20fb3e", "currentJob": {"Name":"backup-minio-1","Type":0}} 2025-05-19T02:50:27.448Z INFO Acquiring the backup lock {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "2caea05f-e197-42e4-903b-9426aa20fb3e"} 2025-05-19T02:50:27.449Z DEBUG backupStatus Got backup meta {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "2caea05f-e197-42e4-903b-9426aa20fb3e", "backup": "backup-minio-1", "pbmName": "2025-05-19T02:49:56Z", "meta": {"type":"logical","opid":"682a9c54563f237f99c7ed1f","name":"2025-05-19T02:49:56Z","shardRemap":{"cfg":"config"},"replsets":[{"name":"rs2","backup_name":"2025-05-19T02:49:56Z/rs2/metadata.json","oplog_name":"2025-05-19T02:49:56Z/rs2/oplog","start_ts":1747622997,"status":"done","last_transition_ts":1747623014,"first_write_ts":{"T":1747622994,"I":1},"last_write_ts":{"T":1747623000,"I":14},"node":"some-name-rs2-1.some-name-rs2.balancer-13428.svc.cluster.local:27017","conditions":[{"timestamp":1747622997,"status":"running"},{"timestamp":1747623001,"status":"dumpDone"},{"timestamp":1747623014,"status":"done"}],"pbm_version":"2.9.1","mongo_version":"8.0.8-3"},{"name":"rs0","backup_name":"2025-05-19T02:49:56Z/rs0/metadata.json","oplog_name":"2025-05-19T02:49:56Z/rs0/oplog","start_ts":1747622997,"status":"done","last_transition_ts":1747623015,"first_write_ts":{"T":1747622994,"I":2},"last_write_ts":{"T":1747623000,"I":18},"node":"some-name-rs0-2.some-name-rs0.balancer-13428.svc.cluster.local:27017","conditions":[{"timestamp":1747622997,"status":"running"},{"timestamp":1747623001,"status":"dumpDone"},{"timestamp":1747623015,"status":"done"}],"pbm_version":"2.9.1","mongo_version":"8.0.8-3"},{"name":"rs1","backup_name":"2025-05-19T02:49:56Z/rs1/metadata.json","oplog_name":"2025-05-19T02:49:56Z/rs1/oplog","start_ts":1747622997,"status":"done","last_transition_ts":1747623021,"first_write_ts":{"T":1747622991,"I":3},"last_write_ts":{"T":1747623011,"I":2},"node":"some-name-rs1-2.some-name-rs1.balancer-13428.svc.cluster.local:27017","conditions":[{"timestamp":1747622997,"status":"running"},{"timestamp":1747623012,"status":"dumpDone"},{"timestamp":1747623021,"status":"done"}],"pbm_version":"2.9.1","mongo_version":"8.0.8-3"},{"name":"cfg","backup_name":"2025-05-19T02:49:56Z/cfg/metadata.json","oplog_name":"2025-05-19T02:49:56Z/cfg/oplog","start_ts":1747622997,"status":"done","iscs":true,"last_transition_ts":1747623023,"first_write_ts":{"T":1747622997,"I":43},"last_write_ts":{"T":1747623013,"I":1},"node":"some-name-cfg-1.some-name-cfg.balancer-13428.svc.cluster.local:27017","conditions":[{"timestamp":1747622997,"status":"running"},{"timestamp":1747623000,"status":"dumpDone"},{"timestamp":1747623023,"status":"done"}],"pbm_version":"2.9.1","mongo_version":"8.0.8-3"}],"compression":"gzip","store":{"type":"s3","s3":{"region":"us-east-1","endpointUrl":"http://minio-service:9000/","forcePathStyle":true,"bucket":"operator-testing","maxUploadParts":10000,"storageClass":"STANDARD","insecureSkipTLSVerify":false}},"size":26811212,"mongodb_version":"8.0.8-3","fcv":"8.0","start_ts":1747622997,"last_transition_ts":1747623024,"first_write_ts":{"T":1747622997,"I":43},"last_write_ts":{"T":1747623000,"I":14},"hb":{"T":1747623022,"I":6},"status":"done","conditions":[{"timestamp":1747622997,"status":"starting"},{"timestamp":1747622999,"status":"running"},{"timestamp":1747623013,"status":"dumpDone"},{"timestamp":1747623024,"status":"done"}],"n":[{"rs":"cfg","n":["some-name-cfg-1.some-name-cfg.balancer-13428.svc.cluster.local:27017","some-name-cfg-2.some-name-cfg.balancer-13428.svc.cluster.local:27017"],"ack":"some-name-cfg-1.some-name-cfg.balancer-13428.svc.cluster.local:27017"},{"rs":"rs2","n":["some-name-rs2-1.some-name-rs2.balancer-13428.svc.cluster.local:27017","some-name-rs2-2.some-name-rs2.balancer-13428.svc.cluster.local:27017"],"ack":"some-name-rs2-1.some-name-rs2.balancer-13428.svc.cluster.local:27017"},{"rs":"rs1","n":["some-name-rs1-1.some-name-rs1.balancer-13428.svc.cluster.local:27017","some-name-rs1-2.some-name-rs1.balancer-13428.svc.cluster.local:27017"],"ack":"some-name-rs1-2.some-name-rs1.balancer-13428.svc.cluster.local:27017"},{"rs":"rs0","n":["some-name-rs0-1.some-name-rs0.balancer-13428.svc.cluster.local:27017","some-name-rs0-2.some-name-rs0.balancer-13428.svc.cluster.local:27017"],"ack":"some-name-rs0-2.some-name-rs0.balancer-13428.svc.cluster.local:27017"}],"pbm_version":"2.9.1","balancer":"off"}} 2025-05-19T02:50:27.451Z INFO Backup state changed {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "2caea05f-e197-42e4-903b-9426aa20fb3e", "previous": "running", "current": "ready"} 2025-05-19T02:50:27.480Z INFO Releasing backup lock {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "2caea05f-e197-42e4-903b-9426aa20fb3e", "lease": "psmdb-some-name-backup-lock"} 2025-05-19T02:50:27.521Z DEBUG Reconcile finished {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "2caea05f-e197-42e4-903b-9426aa20fb3e"} 2025-05-19T02:50:27.521Z DEBUG Reconciling {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "ae551704-6323-4c70-b7b3-9e64315e7589"} 2025-05-19T02:50:27.521Z DEBUG Reconcile finished {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "ae551704-6323-4c70-b7b3-9e64315e7589"} 2025-05-19T02:50:32.521Z DEBUG Reconciling {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "48504a3a-07ac-40d2-a292-e80886274191"} 2025-05-19T02:50:32.521Z DEBUG Reconcile finished {"controller": "psmdbbackup-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBBackup", "PerconaServerMongoDBBackup": {"name":"backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "backup-minio-1", "reconcileID": "48504a3a-07ac-40d2-a292-e80886274191"} 2025-05-19T02:50:45.773Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "11a32bea-e6b0-48bd-a871-199f05073479", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:50:45.890Z INFO Terminating mongos pods {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "11a32bea-e6b0-48bd-a871-199f05073479"} 2025-05-19T02:50:46.037Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "ea2734f9-a258-4b93-ac67-6a868aa290ca", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:50:46.038Z INFO Cluster state changed {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "ea2734f9-a258-4b93-ac67-6a868aa290ca", "previous": "ready", "current": "initializing"} 2025-05-19T02:50:50.721Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "285af35c-2107-4f71-b9ce-4748cfcf8baf", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:50:50.924Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "719f4e76-9c46-4021-8748-7c193f7600a6", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:50:51.049Z INFO Waiting for mongos pods to terminate {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "719f4e76-9c46-4021-8748-7c193f7600a6"} 2025-05-19T02:50:55.611Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "822b8abd-dbfa-4276-97e5-c2f1429b1474", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:50:56.050Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "e4a82df3-0abe-4a84-9a69-a6b2eeaae5bc", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:50:56.161Z INFO Waiting for mongos pods to terminate {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "e4a82df3-0abe-4a84-9a69-a6b2eeaae5bc"} 2025-05-19T02:51:00.318Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "14007605-67fb-4415-85b9-11e605017310", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:51:01.163Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "bb09e755-c19b-4302-9621-b89d75883c31", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:51:01.341Z INFO Waiting for mongos pods to terminate {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "bb09e755-c19b-4302-9621-b89d75883c31"} 2025-05-19T02:51:05.314Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "f3572c0d-0b22-4a3c-81f3-2ef65aa461c5", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:51:06.341Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "4f75ff0b-b59e-44cc-ad42-e5f288c8cd04", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:51:06.537Z INFO Starting restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "4f75ff0b-b59e-44cc-ad42-e5f288c8cd04", "backup": "2025-05-19T02:49:56Z"} 2025-05-19T02:51:06.537Z INFO Starting logical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "4f75ff0b-b59e-44cc-ad42-e5f288c8cd04", "backup": "2025-05-19T02:49:56Z"} 2025-05-19T02:51:06.537Z INFO Sending restore command {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "4f75ff0b-b59e-44cc-ad42-e5f288c8cd04", "restoreCmd": "name: 2025-05-19T02:51:06.537807358Z, snapshot: 2025-05-19T02:49:56Z"} 2025-05-19T02:51:06.541Z INFO Restore state changed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "4f75ff0b-b59e-44cc-ad42-e5f288c8cd04", "previous": "", "current": "requested"} 2025-05-19T02:51:07.123Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "8850dd4e-4754-4763-9395-5c31a6898548", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:51:07.200Z INFO Restore state changed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "8850dd4e-4754-4763-9395-5c31a6898548", "previous": "requested", "current": "running"} 2025-05-19T02:51:07.767Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "73899ce1-be63-4d32-aaae-8468d212b8a8", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:51:12.124Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "01e29e98-4ff1-4eff-a9a3-be513261a6e5", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:51:16.279Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "1b35916e-46b0-48b2-ac57-ab066244e7d4", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:51:17.445Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "f3feb77e-76cd-43ab-9319-dade273af1c8", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:51:22.556Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "c098c6fa-652c-4bee-bc3c-92d03b989f88", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:51:27.281Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "a4575b44-52bc-4c5e-a85b-a5116ccd2df6", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:51:27.653Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "6da0a7e7-0081-44b9-8f38-de80ca7643f8", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:51:32.780Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "100a6552-2f60-4685-8eef-9bad7627d3be", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:51:37.232Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "6d7bb636-4408-4153-88cd-dac66617eff9", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:51:37.889Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "4811b040-41df-4c9b-9522-7f411ea55535", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:51:43.025Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "167e7915-b110-4782-a1b7-1660ef2819a6", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:51:47.188Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "35366160-e507-4c59-b20b-fa7c114d36da", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:51:48.217Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "7f4d3b75-257b-4faa-999a-a06faf1e930f", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:51:53.302Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "ff1b2482-ad8d-4f27-8dd9-2de932fc50af", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:51:57.886Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "bef01862-555f-4c8a-8aa9-bc70d9b5495a", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:51:58.412Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "14f53084-29af-4d9c-8edc-75017508540a", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:52:03.527Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "a766c717-a0ab-4fb6-9dc4-b8d5523cedd1", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:52:07.989Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "70ef9e47-2d5f-4628-83b3-42524c8b1167", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:52:08.652Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "a763fe94-9519-41e0-a3d9-394a00a2d6cc", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:52:13.768Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "74022bc5-8320-4cd7-8198-377ec07a23d0", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:52:18.443Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "4ad2bd9e-4ae3-4398-8e92-c146b2d25501", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:52:18.878Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "8b00f699-daa4-4d1a-b795-88ae29bd70c1", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:52:23.984Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "4f0c06d8-2c53-4d8f-a682-7b16c7339313", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:52:29.152Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "5616151a-d896-4ffa-b928-c1fcbe3b2a29", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:52:29.469Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "27da9ad9-70bc-45df-9f8a-71913306b6a2", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:52:34.280Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "27a87625-9a02-4443-962a-d811396bbbe2", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:52:39.262Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "3a1c63ec-6481-4ab7-babc-ee076ad207c2", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:52:39.397Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "7496a0f2-9ed2-4cc0-952e-79a3c496330b", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:52:44.474Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "b6e1b70e-9a61-467f-9db1-bf9bb551ba09", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:52:49.547Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "7aff2642-1779-420b-9bae-559d9e3b7712", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:52:49.625Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "83f0ceff-7ea2-4c30-91f7-98011f261f4e", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:52:54.639Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "a55e8f1e-1050-43bd-a9d1-d35e5c172559", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:52:59.718Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "3e7bf54b-ca17-47ba-8feb-0e7f3025ecbb", "cluster": "some-name", "namespace": "balancer-13428"} 2025-05-19T02:52:59.792Z INFO Restore state changed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "restore-backup-minio-1", "reconcileID": "3e7bf54b-ca17-47ba-8feb-0e7f3025ecbb", "previous": "running", "current": "ready"} 2025-05-19T02:53:00.148Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "a5909dc6-245d-449b-827b-d4248ba8343c", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:53:10.411Z DEBUG Object created {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "21fd4f87-eed8-4277-9b26-6ae52ba98150", "name": "some-name-mongos", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "generation": 0, "resourceVersion": ""} 2025-05-19T02:53:10.467Z DEBUG Object created {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "21fd4f87-eed8-4277-9b26-6ae52ba98150", "name": "some-name-mongos-", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}", "generation": 0, "resourceVersion": ""} 2025-05-19T02:53:20.785Z ERROR Reconciler error {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "21fd4f87-eed8-4277-9b26-6ae52ba98150", "error": "reconcile mongos: failed to disable balancer: failed to get mongos connection: ping mongo: server selection error: server selection timeout, current topology: { Type: Unknown, Servers: [{ Addr: some-name-mongos.balancer-13428.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 34.118.238.204:27017: connect: connection refused }, ] }", "errorVerbose": "server selection error: server selection timeout, current topology: { Type: Unknown, Servers: [{ Addr: some-name-mongos.balancer-13428.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 34.118.238.204:27017: connect: connection refused }, ] }\nping mongo\ngithub.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo.Dial\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo/mongo.go:123\ngithub.com/percona/percona-server-mongodb-operator/pkg/psmdb.MongosClient\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/psmdb/client.go:85\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*mongoClientProvider).Mongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/connections.go:47\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).mongosClientWithRole\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/connections.go:64\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).disableBalancer\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/balancer.go:144\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).disableBalancerIfNeeded\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/balancer.go:124\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1222\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:423\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.20.4/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.20.4/pkg/internal/controller/controller.go:334\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.20.4/pkg/internal/controller/controller.go:294\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.20.4/pkg/internal/controller/controller.go:255\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1700\nfailed to get mongos connection\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).disableBalancer\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/balancer.go:146\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).disableBalancerIfNeeded\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/balancer.go:124\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1222\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:423\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.20.4/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.20.4/pkg/internal/controller/controller.go:334\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.20.4/pkg/internal/controller/controller.go:294\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.20.4/pkg/internal/controller/controller.go:255\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1700\nfailed to disable balancer\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).reconcileMongos\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:1223\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:423\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.20.4/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.20.4/pkg/internal/controller/controller.go:334\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.20.4/pkg/internal/controller/controller.go:294\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.20.4/pkg/internal/controller/controller.go:255\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1700\nreconcile mongos\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb.(*ReconcilePerconaServerMongoDB).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodb/psmdb_controller.go:425\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.20.4/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.20.4/pkg/internal/controller/controller.go:334\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.20.4/pkg/internal/controller/controller.go:294\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.20.4/pkg/internal/controller/controller.go:255\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1700"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.20.4/pkg/internal/controller/controller.go:347 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.20.4/pkg/internal/controller/controller.go:294 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.2 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.20.4/pkg/internal/controller/controller.go:255 2025-05-19T02:53:26.770Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "b4c60f81-2c50-4880-96c7-9611a3859131", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:53:26.771Z INFO Cluster state changed {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "b4c60f81-2c50-4880-96c7-9611a3859131", "previous": "error", "current": "initializing"} 2025-05-19T02:53:31.691Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "ec3baafc-5c41-4af2-8d7f-cbf70b83f765", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:53:36.644Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "784323f3-7bb7-4b8a-ba24-b9c333658aac", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:53:43.068Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "f84728fd-9564-4355-ab3f-d363b8bf595f", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:53:47.586Z DEBUG Cluster is not ready {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "6fff2f39-2309-47c8-a0d7-4b8bd02756ef", "upgradeInProgress": false, "replsetsReady": 4, "clusterState": "ready"} 2025-05-19T02:53:52.777Z INFO Cluster state changed {"controller": "psmdb-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDB", "PerconaServerMongoDB": {"name":"some-name","namespace":"balancer-13428"}, "namespace": "balancer-13428", "name": "some-name", "reconcileID": "b097ee0e-174e-44cd-bdf8-d7ecaa0c6b38", "previous": "initializing", "current": "ready"} apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDBRestore metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDBRestore","metadata":{"annotations":{},"name":"restore-backup-minio-1","namespace":"balancer-13428"},"spec":{"backupName":"backup-minio-1","clusterName":"some-name"}} creationTimestamp: "2025-05-19T02:50:45Z" generation: 1 name: restore-backup-minio-1 namespace: balancer-13428 resourceVersion: "1747623179811839005" uid: e37ac0aa-7cac-4001-9f2f-306d8a828ff1 spec: backupName: backup-minio-1 clusterName: some-name status: completed: "2025-05-19T02:52:57Z" pbmName: "2025-05-19T02:51:06.537807358Z" state: ready Restore object restore-backup-minio-1 is in ready state. something went wrong with operator or kubernetes cluster