Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/logs/demand-backup-incremental.log grep: warning: stray \ before - Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 + create_infra demand-backup-incremental-6143 + local ns=demand-backup-incremental-6143 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.Pa2nAPaOXR ++ mktemp + local LAST_ERR=/tmp/tmp.BjclnHHSUk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Pa2nAPaOXR customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.BjclnHHSUk + rm /tmp/tmp.Pa2nAPaOXR /tmp/tmp.BjclnHHSUk + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-18086 backup-aws-s3 --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-aws-s3 patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-18086 backup-azure-blob --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-18086 backup-minio-arbiter-nv --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-minio-arbiter-nv patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-18086 backup-minio-not-base --type=merge -p '{"metadata":{"finalizers":[]}}' Error from server (NotFound): perconaservermongodbbackups.psmdb.percona.com "backup-minio-not-base" not found + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.68QrKictPj ++ mktemp + local LAST_ERR=/tmp/tmp.WrGeQ3umk3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.68QrKictPj customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com condition met + cat /tmp/tmp.WrGeQ3umk3 + rm /tmp/tmp.68QrKictPj /tmp/tmp.WrGeQ3umk3 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.W9cHftOuqn ++ mktemp + local LAST_ERR=/tmp/tmp.U6BNqd8CSv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.W9cHftOuqn + cat /tmp/tmp.U6BNqd8CSv + rm /tmp/tmp.W9cHftOuqn /tmp/tmp.U6BNqd8CSv + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.14wFL6iTWu ++ mktemp + local LAST_ERR=/tmp/tmp.gdxB7UGkcN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.14wFL6iTWu + cat /tmp/tmp.gdxB7UGkcN + rm /tmp/tmp.14wFL6iTWu /tmp/tmp.gdxB7UGkcN + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.kfRDgMVsBo ++ mktemp + local LAST_ERR=/tmp/tmp.3R09CFaE7p + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kfRDgMVsBo clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.3R09CFaE7p + rm /tmp/tmp.kfRDgMVsBo /tmp/tmp.3R09CFaE7p + return 0 + check_crd_for_deletion PR-1917-02b4bc7d + local git_tag=PR-1917-02b4bc7d ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1917-02b4bc7d/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vkjJqIi8f5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.aQFWm6WcHd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.vkjJqIi8f5 ++ cat /tmp/tmp.aQFWm6WcHd Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.vkjJqIi8f5 ++ cat /tmp/tmp.aQFWm6WcHd Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.vkjJqIi8f5 ++ cat /tmp/tmp.aQFWm6WcHd Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.vkjJqIi8f5 ++ cat /tmp/tmp.aQFWm6WcHd Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.vkjJqIi8f5 /tmp/tmp.aQFWm6WcHd ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces psmdb-operator' ++ mktemp + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.1GUJaFIq8j ++ mktemp + local LAST_OUT=/tmp/tmp.h6kxQzVaFY egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_ERR=/tmp/tmp.8LMLCf5SpX + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.BxYY3QjpDp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1GUJaFIq8j + cat /tmp/tmp.8LMLCf5SpX + rm /tmp/tmp.1GUJaFIq8j /tmp/tmp.8LMLCf5SpX + return 0 namespace "demand-backup-incremental-18086" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.h6kxQzVaFY namespace "psmdb-operator" deleted + cat /tmp/tmp.BxYY3QjpDp + rm /tmp/tmp.h6kxQzVaFY /tmp/tmp.BxYY3QjpDp + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.bLuTd8TeFk ++ mktemp + local LAST_ERR=/tmp/tmp.6ljQyUmNEN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bLuTd8TeFk + cat /tmp/tmp.6ljQyUmNEN + rm /tmp/tmp.bLuTd8TeFk /tmp/tmp.6ljQyUmNEN + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.iYZ76dHxBY ++ mktemp + local LAST_ERR=/tmp/tmp.LbyDBZpFNh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iYZ76dHxBY namespace/psmdb-operator created + cat /tmp/tmp.LbyDBZpFNh + rm /tmp/tmp.iYZ76dHxBY /tmp/tmp.LbyDBZpFNh + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.wYU9pMJFcl +++ mktemp ++ local LAST_ERR=/tmp/tmp.5ULMBMBc1z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wYU9pMJFcl ++ cat /tmp/tmp.5ULMBMBc1z ++ rm /tmp/tmp.wYU9pMJFcl /tmp/tmp.5ULMBMBc1z ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1917-02b4bc7d-42-cluster1 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.nBffDw3WcM ++ mktemp + local LAST_ERR=/tmp/tmp.bmoCA11lWA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1917-02b4bc7d-42-cluster1 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nBffDw3WcM Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1917-02b4bc7d-42-cluster1" modified. + cat /tmp/tmp.bmoCA11lWA + rm /tmp/tmp.nBffDw3WcM /tmp/tmp.bmoCA11lWA + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-1917-02b4bc7d' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-1917-02b4bc7d ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.ldfRXOU1tf ++ mktemp + local LAST_ERR=/tmp/tmp.QPqVZhMmJb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ldfRXOU1tf customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.QPqVZhMmJb + rm /tmp/tmp.ldfRXOU1tf /tmp/tmp.QPqVZhMmJb + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.xq3wRAZtZL ++ mktemp + local LAST_ERR=/tmp/tmp.ZWfr5b1RB2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xq3wRAZtZL clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.ZWfr5b1RB2 + rm /tmp/tmp.xq3wRAZtZL /tmp/tmp.ZWfr5b1RB2 + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1917-02b4bc7d") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.KTDkctWFHy ++ mktemp + local LAST_ERR=/tmp/tmp.u70RUJPP2b + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KTDkctWFHy deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.u70RUJPP2b + rm /tmp/tmp.KTDkctWFHy /tmp/tmp.u70RUJPP2b + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.n046DDDS61 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Rwl7ac5N3O ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.n046DDDS61 ++ cat /tmp/tmp.Rwl7ac5N3O ++ rm /tmp/tmp.n046DDDS61 /tmp/tmp.Rwl7ac5N3O ++ return 0 + wait_operator_pod percona-server-mongodb-operator-749bfc94f5-nfj85 + local pod=percona-server-mongodb-operator-749bfc94f5-nfj85 + set +o xtrace waiting for pod/percona-server-mongodb-operator-749bfc94f5-nfj85 to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.JHcjj1uZx9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.95d77KBfjk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JHcjj1uZx9 ++ cat /tmp/tmp.95d77KBfjk ++ rm /tmp/tmp.JHcjj1uZx9 /tmp/tmp.95d77KBfjk ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-749bfc94f5-nfj85 ++ mktemp + local LAST_OUT=/tmp/tmp.zFbVAnNz5d ++ mktemp + local LAST_ERR=/tmp/tmp.TnVCZSc1ls + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-749bfc94f5-nfj85 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zFbVAnNz5d + cat /tmp/tmp.TnVCZSc1ls + rm /tmp/tmp.zFbVAnNz5d /tmp/tmp.TnVCZSc1ls + return 0 2025-12-18T10:59:38.611Z INFO setup Manager starting up {"gitCommit": "02b4bc7dc98d6ade6d4d249d26a8ac1fdd611e8b", "gitBranch": "PR-1917-02b4bc7d", "buildTime": "", "goVersion": "go1.25.5", "os": "linux", "arch": "amd64"} + create_namespace demand-backup-incremental-6143 + local namespace=demand-backup-incremental-6143 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces demand-backup-incremental-6143' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-incremental-6143 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace demand-backup-incremental-6143 --ignore-not-found ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.aAADuZwoWH egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_ERR=/tmp/tmp.p7Nn4zfiZS + local exit_status=0 + local timeout=4 + local LAST_OUT=/tmp/tmp.JdEhEfsHk3 ++ seq 0 2 ++ mktemp + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.zCT6gZh8Pr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace demand-backup-incremental-6143 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aAADuZwoWH + cat /tmp/tmp.p7Nn4zfiZS + rm /tmp/tmp.aAADuZwoWH /tmp/tmp.p7Nn4zfiZS + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JdEhEfsHk3 + cat /tmp/tmp.zCT6gZh8Pr + rm /tmp/tmp.JdEhEfsHk3 /tmp/tmp.zCT6gZh8Pr + return 0 + kubectl_bin wait --for=delete namespace demand-backup-incremental-6143 ++ mktemp + local LAST_OUT=/tmp/tmp.xYd4rI2Vce ++ mktemp + local LAST_ERR=/tmp/tmp.0nU1XKev6n + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace demand-backup-incremental-6143 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xYd4rI2Vce + cat /tmp/tmp.0nU1XKev6n + rm /tmp/tmp.xYd4rI2Vce /tmp/tmp.0nU1XKev6n + return 0 + desc 'create namespace demand-backup-incremental-6143' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-incremental-6143 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-incremental-6143 ++ mktemp + local LAST_OUT=/tmp/tmp.KrVl3eleDW ++ mktemp + local LAST_ERR=/tmp/tmp.9gNyku2RDJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace demand-backup-incremental-6143 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KrVl3eleDW namespace/demand-backup-incremental-6143 created + cat /tmp/tmp.9gNyku2RDJ + rm /tmp/tmp.KrVl3eleDW /tmp/tmp.9gNyku2RDJ + return 0 + set_kube_ctx demand-backup-incremental-6143 + local namespace=demand-backup-incremental-6143 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.gkF6yLJbxc +++ mktemp ++ local LAST_ERR=/tmp/tmp.A2CYS40tKE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gkF6yLJbxc ++ cat /tmp/tmp.A2CYS40tKE ++ rm /tmp/tmp.gkF6yLJbxc /tmp/tmp.A2CYS40tKE ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1917-02b4bc7d-42-cluster1 --namespace=demand-backup-incremental-6143 ++ mktemp + local LAST_OUT=/tmp/tmp.VLfvZSTSTP ++ mktemp + local LAST_ERR=/tmp/tmp.NjOXtMWo94 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1917-02b4bc7d-42-cluster1 --namespace=demand-backup-incremental-6143 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VLfvZSTSTP Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1917-02b4bc7d-42-cluster1" modified. + cat /tmp/tmp.NjOXtMWo94 + rm /tmp/tmp.VLfvZSTSTP /tmp/tmp.NjOXtMWo94 + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Thu Dec 18 11:00:14 2025 NAMESPACE: demand-backup-incremental-6143 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.demand-backup-incremental-6143.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace demand-backup-incremental-6143 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace demand-backup-incremental-6143 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace demand-backup-incremental-6143 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace demand-backup-incremental-6143 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mJ2AepNcn2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QraAnBRv6Z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mJ2AepNcn2 ++ cat /tmp/tmp.QraAnBRv6Z ++ rm /tmp/tmp.mJ2AepNcn2 /tmp/tmp.QraAnBRv6Z ++ return 0 + MINIO_POD=minio-service-d9589b474-5rhrc + wait_pod minio-service-d9589b474-5rhrc + local pod=minio-service-d9589b474-5rhrc + set +o xtrace waiting for pod/minio-service-d9589b474-5rhrc to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-incremental-6143.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.HYNljLdNyZ ++ mktemp + local LAST_ERR=/tmp/tmp.r8HPYHHslc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-incremental-6143.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HYNljLdNyZ service/minio-service created + cat /tmp/tmp.r8HPYHHslc + rm /tmp/tmp.HYNljLdNyZ /tmp/tmp.r8HPYHHslc + return 0 + create_minio_bucket operator-testing + local bucket=operator-testing + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.xNvyLaM7MF ++ mktemp + local LAST_ERR=/tmp/tmp.X3KGk3UnGt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xNvyLaM7MF make_bucket: operator-testing pod "aws-cli" deleted from demand-backup-incremental-6143 namespace + cat /tmp/tmp.X3KGk3UnGt All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: unable to upgrade connection: container aws-cli not found in pod aws-cli_demand-backup-incremental-6143 + rm /tmp/tmp.xNvyLaM7MF /tmp/tmp.X3KGk3UnGt + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.AOEbJN6Aiq ++ mktemp + local LAST_ERR=/tmp/tmp.CuUAHEQeKC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AOEbJN6Aiq secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created secret/gcp-cs-sa-key-secret created + cat /tmp/tmp.CuUAHEQeKC + rm /tmp/tmp.AOEbJN6Aiq /tmp/tmp.CuUAHEQeKC + return 0 + desc 'Testing on not sharded cluster' + set +o xtrace ----------------------------------------------------------------------------------- Testing on not sharded cluster ----------------------------------------------------------------------------------- + log 'Creating PSMDB cluster' + set +o xtrace [2025-12-18T11:00:53+0000] Creating PSMDB cluster + cluster=some-name + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.H1sYW9BNHj ++ mktemp + local LAST_ERR=/tmp/tmp.shGtZViUU7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.H1sYW9BNHj secret/some-users created + cat /tmp/tmp.shGtZViUU7 + rm /tmp/tmp.H1sYW9BNHj /tmp/tmp.shGtZViUU7 + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/conf/some-name.yml + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/conf/some-name.yml + kubectl_bin apply -f - + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1917-02b4bc7d"' + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' ++ mktemp + local LAST_OUT=/tmp/tmp.FmPN1JVXDk ++ mktemp + local LAST_ERR=/tmp/tmp.d4vjZTnB8n + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FmPN1JVXDk perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.d4vjZTnB8n + rm /tmp/tmp.FmPN1JVXDk /tmp/tmp.d4vjZTnB8n + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.d6X03OH7Hx ++ mktemp + local LAST_ERR=/tmp/tmp.tRoS0FAqqV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/client_with_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.d6X03OH7Hx deployment.apps/psmdb-client created + cat /tmp/tmp.tRoS0FAqqV + rm /tmp/tmp.d6X03OH7Hx /tmp/tmp.tRoS0FAqqV + return 0 + log 'check if all pods started' + set +o xtrace [2025-12-18T11:00:57+0000] check if all pods started + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XgATLzt3CP +++ mktemp ++ local LAST_ERR=/tmp/tmp.IRvKdUZgEZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XgATLzt3CP ++ cat /tmp/tmp.IRvKdUZgEZ ++ rm /tmp/tmp.XgATLzt3CP /tmp/tmp.IRvKdUZgEZ ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.....OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.x12GV3o0v8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.o1jAM9s92t ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.x12GV3o0v8 ++ cat /tmp/tmp.o1jAM9s92t ++ rm /tmp/tmp.x12GV3o0v8 /tmp/tmp.o1jAM9s92t ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ppcBtToKPc +++ mktemp ++ local LAST_ERR=/tmp/tmp.rxVKqZidpJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ppcBtToKPc ++ cat /tmp/tmp.rxVKqZidpJ ++ rm /tmp/tmp.ppcBtToKPc /tmp/tmp.rxVKqZidpJ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.. + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nTHbaaoHOa +++ mktemp ++ local LAST_ERR=/tmp/tmp.0mRJ74hO6h ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nTHbaaoHOa ++ cat /tmp/tmp.0mRJ74hO6h ++ rm /tmp/tmp.nTHbaaoHOa /tmp/tmp.0mRJ74hO6h ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + log 'writing test data' + set +o xtrace [2025-12-18T11:02:31+0000] writing test data + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-6143 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-6143 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JSlqwwVK5Y +++ mktemp ++ local LAST_ERR=/tmp/tmp.YZWvVpJtM8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JSlqwwVK5Y ++ cat /tmp/tmp.YZWvVpJtM8 ++ rm /tmp/tmp.JSlqwwVK5Y /tmp/tmp.YZWvVpJtM8 ++ return 0 + local client_container=psmdb-client-8f86f7874-r95mz + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-6143 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.bvBaJPAF96 ++ mktemp + local LAST_ERR=/tmp/tmp.d1EmpSYM9w + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bvBaJPAF96 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("cc023746-bb0c-48af-a242-ea60ab3cceb9") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.d1EmpSYM9w + rm /tmp/tmp.bvBaJPAF96 /tmp/tmp.d1EmpSYM9w + return 0 + sleep 1 + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.demand-backup-incremental-6143 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-6143 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.N1px5NcyIh +++ mktemp ++ local LAST_ERR=/tmp/tmp.rzAinEEIBw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.N1px5NcyIh ++ cat /tmp/tmp.rzAinEEIBw ++ rm /tmp/tmp.N1px5NcyIh /tmp/tmp.rzAinEEIBw ++ return 0 + local client_container=psmdb-client-8f86f7874-r95mz + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-6143 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Q1SuxZmoQN ++ mktemp + local LAST_ERR=/tmp/tmp.f48sEhboUq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Q1SuxZmoQN Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("64b37d80-0e33-4c3a-b97e-44652dae4718") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.f48sEhboUq + rm /tmp/tmp.Q1SuxZmoQN /tmp/tmp.f48sEhboUq + return 0 + sleep 5 + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-18T11:02:42+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.O1dndcGoJ7 +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_ERR=/tmp/tmp.fDZmU0exRm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.O1dndcGoJ7 ++ cat /tmp/tmp.fDZmU0exRm ++ rm /tmp/tmp.O1dndcGoJ7 /tmp/tmp.fDZmU0exRm ++ return 0 + local client_container=psmdb-client-8f86f7874-r95mz + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.0sgp4NCZ8B ++ mktemp + local LAST_ERR=/tmp/tmp.8O9bCXWyXI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0sgp4NCZ8B + cat /tmp/tmp.8O9bCXWyXI + rm /tmp/tmp.0sgp4NCZ8B /tmp/tmp.8O9bCXWyXI + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.LBiCyv6isV/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-18T11:02:45+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.9e7p3sVWdb +++ mktemp ++ local LAST_ERR=/tmp/tmp.1Zu4x8ZDzO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9e7p3sVWdb ++ cat /tmp/tmp.1Zu4x8ZDzO ++ rm /tmp/tmp.9e7p3sVWdb /tmp/tmp.1Zu4x8ZDzO ++ return 0 + local client_container=psmdb-client-8f86f7874-r95mz + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ExtxJElSxl ++ mktemp + local LAST_ERR=/tmp/tmp.z2sttczwb3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ExtxJElSxl + cat /tmp/tmp.z2sttczwb3 + rm /tmp/tmp.ExtxJElSxl /tmp/tmp.z2sttczwb3 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.LBiCyv6isV/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-18T11:02:47+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.BeIf7KqEFY +++ mktemp ++ local LAST_ERR=/tmp/tmp.VRGVg7SpML ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BeIf7KqEFY ++ cat /tmp/tmp.VRGVg7SpML ++ rm /tmp/tmp.BeIf7KqEFY /tmp/tmp.VRGVg7SpML ++ return 0 + local client_container=psmdb-client-8f86f7874-r95mz + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.cA2n7Y2y0R ++ mktemp + local LAST_ERR=/tmp/tmp.6m4weDZGsR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cA2n7Y2y0R + cat /tmp/tmp.6m4weDZGsR + rm /tmp/tmp.cA2n7Y2y0R /tmp/tmp.6m4weDZGsR + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.LBiCyv6isV/find + log 'running backups' + set +o xtrace [2025-12-18T11:02:50+0000] running backups + '[' -z '' ']' + backup_name_aws=backup-aws-s3 + backup_name_gcp=backup-gcp-cs + backup_name_azure=backup-azure-blob + run_backup aws-s3 backup-aws-s3 + local storage=aws-s3 + local backup_name=backup-aws-s3 + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/conf/backup.yml + /usr/sbin/sed -e 's/storageName:/storageName: aws-s3/' + /usr/sbin/sed -e 's/name:/name: backup-aws-s3/' + yq '.spec.type="incremental-base"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.ZH5AvLZNbZ ++ mktemp + local LAST_ERR=/tmp/tmp.jAqyPjTMDQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZH5AvLZNbZ perconaservermongodbbackup.psmdb.percona.com/backup-aws-s3 created + cat /tmp/tmp.jAqyPjTMDQ + rm /tmp/tmp.ZH5AvLZNbZ /tmp/tmp.jAqyPjTMDQ + return 0 + run_backup gcp-cs backup-gcp-cs + local storage=gcp-cs + local backup_name=backup-gcp-cs + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-gcp-cs/' + /usr/sbin/sed -e 's/storageName:/storageName: gcp-cs/' + yq '.spec.type="incremental-base"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.uElGeAshsK ++ mktemp + local LAST_ERR=/tmp/tmp.r7FntTdXdT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uElGeAshsK perconaservermongodbbackup.psmdb.percona.com/backup-gcp-cs created + cat /tmp/tmp.r7FntTdXdT + rm /tmp/tmp.uElGeAshsK /tmp/tmp.r7FntTdXdT + return 0 + run_backup azure-blob backup-azure-blob + local storage=azure-blob + local backup_name=backup-azure-blob + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-azure-blob/' + /usr/sbin/sed -e 's/storageName:/storageName: azure-blob/' + yq '.spec.type="incremental-base"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.yV8cA5bzXi ++ mktemp + local LAST_ERR=/tmp/tmp.LmN4AaE5nJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yV8cA5bzXi perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob created + cat /tmp/tmp.LmN4AaE5nJ + rm /tmp/tmp.yV8cA5bzXi /tmp/tmp.LmN4AaE5nJ + return 0 + wait_backup backup-aws-s3 + local backup_name=backup-aws-s3 + local target_state=ready + set +o xtrace waiting for backup-aws-s3 to reach ready state...............OK + check_backup_in_storage backup-aws-s3 s3 rs0 + local backup=backup-aws-s3 + local storage_type=s3 + local replset=rs0 + local file=filelist.pbm + local endpoint ++ get_backup_dest backup-aws-s3 ++ local backup_name=backup-aws-s3 ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-aws-s3 -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ mktemp ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.2X8Ji138Rd +++ mktemp ++ local LAST_ERR=/tmp/tmp.qydjT2gCH2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-aws-s3 -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2X8Ji138Rd ++ cat /tmp/tmp.qydjT2gCH2 ++ rm /tmp/tmp.2X8Ji138Rd /tmp/tmp.qydjT2gCH2 ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental/2025-12-18T11:02:53Z + case ${storage_type} in + endpoint=s3.amazonaws.com + setup_aws_credentials + local secret_name=aws-s3-secret + [[ -n AKIARXP3OARBHR2RVML2 ]] + [[ -n YJw4YBqTnqLLKr8cCKKNGvtAjRYcz/iKqdWOBKy1 ]] + echo 'AWS credentials already set in environment' AWS credentials already set in environment + return 0 + check_backup_existence_aws operator-testing/psmdb-demand-backup-incremental/2025-12-18T11:02:53Z /rs0/filelist.pbm ++ echo operator-testing/psmdb-demand-backup-incremental/2025-12-18T11:02:53Z ++ cut -d/ -f1 + bucket=operator-testing ++ echo operator-testing/psmdb-demand-backup-incremental/2025-12-18T11:02:53Z ++ cut -d/ -f2- + key_prefix=psmdb-demand-backup-incremental/2025-12-18T11:02:53Z + key=/rs0/filelist.pbm + storage_name=aws-s3 + retry=0 + aws s3api head-object --bucket operator-testing --key psmdb-demand-backup-incremental/2025-12-18T11:02:53Z/rs0/filelist.pbm + echo 'Backup operator-testing/psmdb-demand-backup-incremental/2025-12-18T11:02:53Z//rs0/filelist.pbm found in aws-s3' Backup operator-testing/psmdb-demand-backup-incremental/2025-12-18T11:02:53Z//rs0/filelist.pbm found in aws-s3 + wait_backup backup-gcp-cs + local backup_name=backup-gcp-cs + local target_state=ready + set +o xtrace waiting for backup-gcp-cs to reach ready state...........OK + check_backup_in_storage backup-gcp-cs gcs rs0 + local backup=backup-gcp-cs + local storage_type=gcs + local replset=rs0 + local file=filelist.pbm + local endpoint ++ get_backup_dest backup-gcp-cs ++ local backup_name=backup-gcp-cs ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-gcp-cs -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' +++ mktemp ++ sed 's|azure://||' ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.NTTnzJhR7h +++ mktemp ++ local LAST_ERR=/tmp/tmp.MgdRQ2sxTv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-gcp-cs -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NTTnzJhR7h ++ cat /tmp/tmp.MgdRQ2sxTv ++ rm /tmp/tmp.NTTnzJhR7h /tmp/tmp.MgdRQ2sxTv ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental/2025-12-18T11:03:24Z + case ${storage_type} in + endpoint=storage.googleapis.com + setup_gcs_credentials + local secret_name=gcp-cs-secret + gsutil ls + echo 'Setting up GCS credentials from K8s secret: gcp-cs-secret' Setting up GCS credentials from K8s secret: gcp-cs-secret + local trace_was_on=0 + [[ ehxB == *x* ]] + trace_was_on=1 + set +x + echo 'GCS credentials configured successfully' GCS credentials configured successfully + check_backup_existence_gcs operator-testing/psmdb-demand-backup-incremental/2025-12-18T11:03:24Z /rs0/filelist.pbm + backup_dest_gcp=operator-testing/psmdb-demand-backup-incremental/2025-12-18T11:03:24Z + obj=/rs0/filelist.pbm + storage_name=gcp-cs + retry=0 + gcs_path=gs://operator-testing/psmdb-demand-backup-incremental/2025-12-18T11:03:24Z/rs0/filelist.pbm + gsutil ls gs://operator-testing/psmdb-demand-backup-incremental/2025-12-18T11:03:24Z/rs0/filelist.pbm + echo 'Backup gs://operator-testing/psmdb-demand-backup-incremental/2025-12-18T11:03:24Z/rs0/filelist.pbm found in gcp-cs' Backup gs://operator-testing/psmdb-demand-backup-incremental/2025-12-18T11:03:24Z/rs0/filelist.pbm found in gcp-cs + wait_backup backup-azure-blob + local backup_name=backup-azure-blob + local target_state=ready + set +o xtrace waiting for backup-azure-blob to reach ready state.........OK + check_backup_in_storage backup-azure-blob azure rs0 + local backup=backup-azure-blob + local storage_type=azure + local replset=rs0 + local file=filelist.pbm + local endpoint ++ get_backup_dest backup-azure-blob ++ local backup_name=backup-azure-blob ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-azure-blob -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ mktemp ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.8iPxTXc4a2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.jYo2PLqdSe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-azure-blob -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8iPxTXc4a2 ++ cat /tmp/tmp.jYo2PLqdSe ++ rm /tmp/tmp.8iPxTXc4a2 /tmp/tmp.jYo2PLqdSe ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental/2025-12-18T11:03:47Z + case ${storage_type} in + endpoint=engk8soperators.blob.core.windows.net + setup_azure_credentials + local secret_name=azure-secret + echo 'Setting up Azure credentials from K8s secret: azure-secret' Setting up Azure credentials from K8s secret: azure-secret + local trace_was_on=0 + [[ ehxB == *x* ]] + trace_was_on=1 + set +x + echo 'Azure credentials configured successfully' Azure credentials configured successfully + check_backup_existence_azure operator-testing/psmdb-demand-backup-incremental/2025-12-18T11:03:47Z /rs0/filelist.pbm ++ echo operator-testing/psmdb-demand-backup-incremental/2025-12-18T11:03:47Z ++ sed 's|https://[^/]*\.blob\.core\.windows\.net/||' + url_path=operator-testing/psmdb-demand-backup-incremental/2025-12-18T11:03:47Z ++ echo operator-testing/psmdb-demand-backup-incremental/2025-12-18T11:03:47Z ++ cut -d/ -f1 + container=operator-testing ++ echo operator-testing/psmdb-demand-backup-incremental/2025-12-18T11:03:47Z ++ cut -d/ -f2- + blob_prefix=psmdb-demand-backup-incremental/2025-12-18T11:03:47Z + blob=/rs0/filelist.pbm + storage_name=azure-blob + retry=0 + blob_path=psmdb-demand-backup-incremental/2025-12-18T11:03:47Z/rs0/filelist.pbm + az storage blob show --container-name operator-testing --name psmdb-demand-backup-incremental/2025-12-18T11:03:47Z/rs0/filelist.pbm + echo 'Backup psmdb-demand-backup-incremental/2025-12-18T11:03:47Z/rs0/filelist.pbm found in azure-blob' Backup psmdb-demand-backup-incremental/2025-12-18T11:03:47Z/rs0/filelist.pbm found in azure-blob + backup_name_minio=backup-minio + run_backup minio backup-minio + local storage=minio + local backup_name=backup-minio + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-minio/' + /usr/sbin/sed -e 's/storageName:/storageName: minio/' + yq '.spec.type="incremental-base"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.VtJ6PWAnAJ ++ mktemp + local LAST_ERR=/tmp/tmp.kDdGJbmHpd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VtJ6PWAnAJ perconaservermongodbbackup.psmdb.percona.com/backup-minio created + cat /tmp/tmp.kDdGJbmHpd + rm /tmp/tmp.VtJ6PWAnAJ /tmp/tmp.kDdGJbmHpd + return 0 + wait_backup backup-minio + local backup_name=backup-minio + local target_state=ready + set +o xtrace waiting for backup-minio to reach ready state.......OK + check_backup_in_storage backup-minio minio rs0 + local backup=backup-minio + local storage_type=minio + local replset=rs0 + local file=filelist.pbm + local endpoint ++ get_backup_dest backup-minio ++ local backup_name=backup-minio ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ mktemp ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.YgDz1nVqs1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.qoxxQFLzNd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YgDz1nVqs1 ++ cat /tmp/tmp.qoxxQFLzNd ++ rm /tmp/tmp.YgDz1nVqs1 /tmp/tmp.qoxxQFLzNd ++ return 0 + backup_dest=operator-testing/2025-12-18T11:04:14Z + case ${storage_type} in + endpoint=minio-service + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-12-18T11:04:14Z/rs0/filelist.pbm + grep filelist.pbm ++ mktemp + local LAST_OUT=/tmp/tmp.ZKOXK82mTg ++ mktemp + local LAST_ERR=/tmp/tmp.SXV3aLuVfp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-12-18T11:04:14Z/rs0/filelist.pbm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZKOXK82mTg + cat /tmp/tmp.SXV3aLuVfp All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.ZKOXK82mTg /tmp/tmp.SXV3aLuVfp + return 0 + sleep 1 + let retry+=1 + '[' 7 -ge 60 ']' + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-12-18T11:04:14Z/rs0/filelist.pbm + grep filelist.pbm ++ mktemp + local LAST_OUT=/tmp/tmp.3adETbSMjk ++ mktemp + local LAST_ERR=/tmp/tmp.YpVqEX8PL6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-12-18T11:04:14Z/rs0/filelist.pbm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3adETbSMjk + cat /tmp/tmp.YpVqEX8PL6 All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.3adETbSMjk /tmp/tmp.YpVqEX8PL6 + return 0 + sleep 1 + let retry+=1 + '[' 8 -ge 60 ']' + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-12-18T11:04:14Z/rs0/filelist.pbm + grep filelist.pbm ++ mktemp + local LAST_OUT=/tmp/tmp.EijpNLSOGr ++ mktemp + local LAST_ERR=/tmp/tmp.u58avOvDSr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-12-18T11:04:14Z/rs0/filelist.pbm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EijpNLSOGr + cat /tmp/tmp.u58avOvDSr + rm /tmp/tmp.EijpNLSOGr /tmp/tmp.u58avOvDSr + return 0 2025-12-18 11:04:22 12286 filelist.pbm + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-incremental-6143 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-6143 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nBgeVRI3Rz +++ mktemp ++ local LAST_ERR=/tmp/tmp.ueFbG7aWnV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nBgeVRI3Rz ++ cat /tmp/tmp.ueFbG7aWnV ++ rm /tmp/tmp.nBgeVRI3Rz /tmp/tmp.ueFbG7aWnV ++ return 0 + local client_container=psmdb-client-8f86f7874-r95mz + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-6143 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Pz9WU9MBjR ++ mktemp + local LAST_ERR=/tmp/tmp.zQ1NBnSawd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Pz9WU9MBjR Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("8eb23db9-2b32-4fcb-9234-651971d4b1a4") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.zQ1NBnSawd + rm /tmp/tmp.Pz9WU9MBjR /tmp/tmp.zQ1NBnSawd + return 0 + sleep 5 + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-18T11:04:59+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UnqJwHpkVF +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_ERR=/tmp/tmp.kFDXeC91nj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UnqJwHpkVF ++ cat /tmp/tmp.kFDXeC91nj ++ rm /tmp/tmp.UnqJwHpkVF /tmp/tmp.kFDXeC91nj ++ return 0 + local client_container=psmdb-client-8f86f7874-r95mz + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Uv4c9w0Z6O ++ mktemp + local LAST_ERR=/tmp/tmp.9RcxVE4ncJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Uv4c9w0Z6O + cat /tmp/tmp.9RcxVE4ncJ + rm /tmp/tmp.Uv4c9w0Z6O /tmp/tmp.9RcxVE4ncJ + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.LBiCyv6isV/find-not-base + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-18T11:05:01+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.jhwSSu7uIV +++ mktemp ++ local LAST_ERR=/tmp/tmp.YrXmQIqmhx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jhwSSu7uIV ++ cat /tmp/tmp.YrXmQIqmhx ++ rm /tmp/tmp.jhwSSu7uIV /tmp/tmp.YrXmQIqmhx ++ return 0 + local client_container=psmdb-client-8f86f7874-r95mz + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.WQMIPIO21f ++ mktemp + local LAST_ERR=/tmp/tmp.7qWCNqjDRq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WQMIPIO21f + cat /tmp/tmp.7qWCNqjDRq + rm /tmp/tmp.WQMIPIO21f /tmp/tmp.7qWCNqjDRq + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.LBiCyv6isV/find-not-base + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-18T11:05:04+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.u9GuU0QoQm +++ mktemp ++ local LAST_ERR=/tmp/tmp.CoqxjaU4ot ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.u9GuU0QoQm ++ cat /tmp/tmp.CoqxjaU4ot ++ rm /tmp/tmp.u9GuU0QoQm /tmp/tmp.CoqxjaU4ot ++ return 0 + local client_container=psmdb-client-8f86f7874-r95mz + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.aHLBbImX92 ++ mktemp + local LAST_ERR=/tmp/tmp.DHtWrmPQvN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aHLBbImX92 + cat /tmp/tmp.DHtWrmPQvN + rm /tmp/tmp.aHLBbImX92 /tmp/tmp.DHtWrmPQvN + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.LBiCyv6isV/find-not-base + backup_name_minio_not_base=backup-minio-not-base + run_backup minio backup-minio-not-base false + local storage=minio + local backup_name=backup-minio-not-base + local base=false + local backup_type=incremental + [[ false == \t\r\u\e ]] + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-not-base/' + /usr/sbin/sed -e 's/storageName:/storageName: minio/' + yq '.spec.type="incremental"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.1jbX3QKSwi ++ mktemp + local LAST_ERR=/tmp/tmp.01Q6Bn94lr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1jbX3QKSwi perconaservermongodbbackup.psmdb.percona.com/backup-minio-not-base created + cat /tmp/tmp.01Q6Bn94lr + rm /tmp/tmp.1jbX3QKSwi /tmp/tmp.01Q6Bn94lr + return 0 + wait_backup backup-minio-not-base + local backup_name=backup-minio-not-base + local target_state=ready + set +o xtrace waiting for backup-minio-not-base to reach ready state.......OK + check_backup_in_storage backup-minio-not-base minio rs0 + local backup=backup-minio-not-base + local storage_type=minio + local replset=rs0 + local file=filelist.pbm + local endpoint ++ get_backup_dest backup-minio-not-base ++ local backup_name=backup-minio-not-base ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-minio-not-base -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' +++ mktemp ++ sed 's|gs://||' ++ sed 's|azure://||' ++ local LAST_OUT=/tmp/tmp.HvDnL2o7vE +++ mktemp ++ local LAST_ERR=/tmp/tmp.O2ABmwyjPB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-not-base -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HvDnL2o7vE ++ cat /tmp/tmp.O2ABmwyjPB ++ rm /tmp/tmp.HvDnL2o7vE /tmp/tmp.O2ABmwyjPB ++ return 0 + backup_dest=operator-testing/2025-12-18T11:05:09Z + case ${storage_type} in + endpoint=minio-service + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-12-18T11:05:09Z/rs0/filelist.pbm + grep filelist.pbm ++ mktemp + local LAST_OUT=/tmp/tmp.cxzPl8v3wo ++ mktemp + local LAST_ERR=/tmp/tmp.Gz6LBHWYaE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-12-18T11:05:09Z/rs0/filelist.pbm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cxzPl8v3wo + cat /tmp/tmp.Gz6LBHWYaE + rm /tmp/tmp.cxzPl8v3wo /tmp/tmp.Gz6LBHWYaE + return 0 2025-12-18 11:05:15 13086 filelist.pbm + '[' -z '' ']' + run_restore backup-aws-s3 + local backup_name=backup-aws-s3 + log 'drop collection' + set +o xtrace [2025-12-18T11:05:26+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-6143 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-6143 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LGjiuPlIvR +++ mktemp ++ local LAST_ERR=/tmp/tmp.vy2ukXinkY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LGjiuPlIvR ++ cat /tmp/tmp.vy2ukXinkY ++ rm /tmp/tmp.LGjiuPlIvR /tmp/tmp.vy2ukXinkY ++ return 0 + local client_container=psmdb-client-8f86f7874-r95mz + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-6143 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.DkE1zXaf4v ++ mktemp + local LAST_ERR=/tmp/tmp.gHKVphuHRL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DkE1zXaf4v Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("71b39317-8134-4fb5-8309-c58d36b28b06") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.gHKVphuHRL + rm /tmp/tmp.DkE1zXaf4v /tmp/tmp.gHKVphuHRL + return 0 + log 'check backup and restore -- backup-aws-s3' + set +o xtrace [2025-12-18T11:05:28+0000] check backup and restore -- backup-aws-s3 + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-aws-s3/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-aws-s3/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Rlp3MMa4MS ++ mktemp + local LAST_ERR=/tmp/tmp.zpGmlIWMES + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Rlp3MMa4MS perconaservermongodbrestore.psmdb.percona.com/restore-backup-aws-s3 created + cat /tmp/tmp.zpGmlIWMES + rm /tmp/tmp.Rlp3MMa4MS /tmp/tmp.zpGmlIWMES + return 0 + run_recovery_check backup-aws-s3 + local backup_name=backup-aws-s3 + local compare_suffix=_restore + local base=true + wait_restore backup-aws-s3 some-name requested 0 3000 + local backup_name=backup-aws-s3 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-aws-s3 object to be created.OK Waiting psmdb-restore/restore-backup-aws-s3 to reach state "requested" ..OK after 1 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore + local resource=statefulset/some-name-rs0 + local postfix=_restore + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml + local new_result=/tmp/tmp.LBiCyv6isV/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-6143", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.qqru5tej1q ++ mktemp + local LAST_ERR=/tmp/tmp.9epJW0sjcz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qqru5tej1q + cat /tmp/tmp.9epJW0sjcz + rm /tmp/tmp.qqru5tej1q /tmp/tmp.9epJW0sjcz + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.LBiCyv6isV/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.LBiCyv6isV/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.LBiCyv6isV/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml /tmp/tmp.LBiCyv6isV/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-12-18T11:07:21+0000] compare_kubectl: statefulset/some-name-rs0 OK + wait_restore backup-aws-s3 some-name ready 0 1800 + local backup_name=backup-aws-s3 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-aws-s3 object to be created.OK Waiting psmdb-restore/restore-backup-aws-s3 to reach state "ready" ..OK after 1 minutes + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DRIJar1ygc +++ mktemp ++ local LAST_ERR=/tmp/tmp.7sw0h9oVPv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DRIJar1ygc ++ cat /tmp/tmp.7sw0h9oVPv ++ rm /tmp/tmp.DRIJar1ygc /tmp/tmp.7sw0h9oVPv ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GhKy4To9lQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.CAb6zZ2LQl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GhKy4To9lQ ++ cat /tmp/tmp.CAb6zZ2LQl ++ rm /tmp/tmp.GhKy4To9lQ /tmp/tmp.CAb6zZ2LQl ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jMwPl0PtTz +++ mktemp ++ local LAST_ERR=/tmp/tmp.pwff2rUB7w ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jMwPl0PtTz ++ cat /tmp/tmp.pwff2rUB7w ++ rm /tmp/tmp.jMwPl0PtTz /tmp/tmp.pwff2rUB7w ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1HbzeEmD8D +++ mktemp ++ local LAST_ERR=/tmp/tmp.tSD6LNlfaa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1HbzeEmD8D ++ cat /tmp/tmp.tSD6LNlfaa ++ rm /tmp/tmp.1HbzeEmD8D /tmp/tmp.tSD6LNlfaa ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6LJTQUigaQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.VY8Y8t4ZVF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6LJTQUigaQ ++ cat /tmp/tmp.VY8Y8t4ZVF ++ rm /tmp/tmp.6LJTQUigaQ /tmp/tmp.VY8Y8t4ZVF ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mqJQ8KrQ8N +++ mktemp ++ local LAST_ERR=/tmp/tmp.4PiYVvStkn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mqJQ8KrQ8N ++ cat /tmp/tmp.4PiYVvStkn ++ rm /tmp/tmp.mqJQ8KrQ8N /tmp/tmp.4PiYVvStkn ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CqTYhbuAxn +++ mktemp ++ local LAST_ERR=/tmp/tmp.zK0qdCm8op ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CqTYhbuAxn ++ cat /tmp/tmp.zK0qdCm8op ++ rm /tmp/tmp.CqTYhbuAxn /tmp/tmp.zK0qdCm8op ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JbHihnONQf +++ mktemp ++ local LAST_ERR=/tmp/tmp.lBpPrTmJll ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JbHihnONQf ++ cat /tmp/tmp.lBpPrTmJll ++ rm /tmp/tmp.JbHihnONQf /tmp/tmp.lBpPrTmJll ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.45szOpljUw +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZrNNdh48RN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.45szOpljUw ++ cat /tmp/tmp.ZrNNdh48RN ++ rm /tmp/tmp.45szOpljUw /tmp/tmp.ZrNNdh48RN ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish....OK + [[ true == true ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-18T11:10:54+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.ja5EPyOL2R +++ mktemp ++ local LAST_ERR=/tmp/tmp.w0JKWKbeZh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ja5EPyOL2R ++ cat /tmp/tmp.w0JKWKbeZh ++ rm /tmp/tmp.ja5EPyOL2R /tmp/tmp.w0JKWKbeZh ++ return 0 + local client_container=psmdb-client-8f86f7874-r95mz + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.moUEs59IYX ++ mktemp + local LAST_ERR=/tmp/tmp.dVroMIr593 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.moUEs59IYX + cat /tmp/tmp.dVroMIr593 + rm /tmp/tmp.moUEs59IYX /tmp/tmp.dVroMIr593 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.LBiCyv6isV/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-18T11:10:57+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.DbTQ4G7beA +++ mktemp ++ local LAST_ERR=/tmp/tmp.vWAggUJWSh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DbTQ4G7beA ++ cat /tmp/tmp.vWAggUJWSh ++ rm /tmp/tmp.DbTQ4G7beA /tmp/tmp.vWAggUJWSh ++ return 0 + local client_container=psmdb-client-8f86f7874-r95mz + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.HKOO4LPAvS ++ mktemp + local LAST_ERR=/tmp/tmp.9vNbq2ceJb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HKOO4LPAvS + cat /tmp/tmp.9vNbq2ceJb + rm /tmp/tmp.HKOO4LPAvS /tmp/tmp.9vNbq2ceJb + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.LBiCyv6isV/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-18T11:10:59+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' egrep: warning: egrep is obsolescent; using grep -E +++ mktemp ++ local LAST_OUT=/tmp/tmp.9Yr4CuX4ds +++ mktemp ++ local LAST_ERR=/tmp/tmp.VaYYAQIbPc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9Yr4CuX4ds ++ cat /tmp/tmp.VaYYAQIbPc ++ rm /tmp/tmp.9Yr4CuX4ds /tmp/tmp.VaYYAQIbPc ++ return 0 + local client_container=psmdb-client-8f86f7874-r95mz + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.l1LYdcJ9xe ++ mktemp + local LAST_ERR=/tmp/tmp.4bAQMvBpDn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.l1LYdcJ9xe + cat /tmp/tmp.4bAQMvBpDn + rm /tmp/tmp.l1LYdcJ9xe /tmp/tmp.4bAQMvBpDn + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.LBiCyv6isV/find + run_restore backup-gcp-cs + local backup_name=backup-gcp-cs + log 'drop collection' + set +o xtrace [2025-12-18T11:11:01+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-6143 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-6143 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.e2GVggMeLu +++ mktemp ++ local LAST_ERR=/tmp/tmp.B2u2F4SRjV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.e2GVggMeLu ++ cat /tmp/tmp.B2u2F4SRjV ++ rm /tmp/tmp.e2GVggMeLu /tmp/tmp.B2u2F4SRjV ++ return 0 + local client_container=psmdb-client-8f86f7874-r95mz + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-6143 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.3mT04Pzv0Z ++ mktemp + local LAST_ERR=/tmp/tmp.0Txkn5hoXf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-r95mz -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-6143.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3mT04Pzv0Z Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("70d9eaf6-8ea1-4840-baae-4997eb69623a") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.0Txkn5hoXf + rm /tmp/tmp.3mT04Pzv0Z /tmp/tmp.0Txkn5hoXf + return 0 + log 'check backup and restore -- backup-gcp-cs' + set +o xtrace [2025-12-18T11:11:04+0000] check backup and restore -- backup-gcp-cs + /usr/sbin/sed -e 's/name:/name: restore-backup-gcp-cs/' + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/conf/restore.yml + kubectl_bin apply -f - + /usr/sbin/sed -e 's/backupName:/backupName: backup-gcp-cs/' ++ mktemp + local LAST_OUT=/tmp/tmp.bbxg7YJhbT ++ mktemp + local LAST_ERR=/tmp/tmp.qBrnEvuPf2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bbxg7YJhbT perconaservermongodbrestore.psmdb.percona.com/restore-backup-gcp-cs created + cat /tmp/tmp.qBrnEvuPf2 + rm /tmp/tmp.bbxg7YJhbT /tmp/tmp.qBrnEvuPf2 + return 0 + run_recovery_check backup-gcp-cs + local backup_name=backup-gcp-cs + local compare_suffix=_restore + local base=true + wait_restore backup-gcp-cs some-name requested 0 3000 + local backup_name=backup-gcp-cs + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-gcp-cs object to be created.OK Waiting psmdb-restore/restore-backup-gcp-cs to reach state "requested" ..OK after 1 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore + local resource=statefulset/some-name-rs0 + local postfix=_restore + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml + local new_result=/tmp/tmp.LBiCyv6isV/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-6143", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.iwugqY55W2 ++ mktemp + local LAST_ERR=/tmp/tmp.Dv7lt4PScW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iwugqY55W2 + cat /tmp/tmp.Dv7lt4PScW + rm /tmp/tmp.iwugqY55W2 /tmp/tmp.Dv7lt4PScW + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.LBiCyv6isV/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.LBiCyv6isV/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.LBiCyv6isV/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml /tmp/tmp.LBiCyv6isV/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-12-18T11:12:40+0000] compare_kubectl: statefulset/some-name-rs0 OK + wait_restore backup-gcp-cs some-name ready 0 1800 + local backup_name=backup-gcp-cs + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-gcp-cs object to be created.OK Waiting psmdb-restore/restore-backup-gcp-cs to reach state "ready" .....2025-12-18T11:15:35.429Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "668faa16-5e2f-44c2-968d-58f4da65dc86", "status": {"type":"physical","opid":"","name":"2025-12-18T11:12:38.887390155Z","replsets":[{"name":"rs0","start_ts":0,"status":"","size":0,"size_uncompressed":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"size_uncompressed":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-12-18T11:15:40.430Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "2167cf58-98e2-462e-a194-b0de32f95a07", "cluster": "some-name", "namespace": "demand-backup-incremental-6143"} 2025-12-18T11:15:40.430Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "2167cf58-98e2-462e-a194-b0de32f95a07", "pod": "some-name-rs0-0"} 2025-12-18T11:15:40.430Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "2167cf58-98e2-462e-a194-b0de32f95a07", "pod": "some-name-rs0-1"} 2025-12-18T11:15:40.430Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "2167cf58-98e2-462e-a194-b0de32f95a07", "pod": "some-name-rs0-2"} 2025-12-18T11:15:40.430Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "2167cf58-98e2-462e-a194-b0de32f95a07", "sts": "some-name-rs0", "replset": "rs0"} 2025-12-18T11:15:40.430Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "2167cf58-98e2-462e-a194-b0de32f95a07", "command": ["/opt/percona/pbm", "describe-restore", "2025-12-18T11:12:38.887390155Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-12-18T11:15:40.582Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "2167cf58-98e2-462e-a194-b0de32f95a07", "status": {"type":"physical","opid":"","name":"2025-12-18T11:12:38.887390155Z","replsets":[{"name":"rs0","start_ts":0,"status":"","size":0,"size_uncompressed":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"size_uncompressed":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-12-18T11:15:45.583Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "228de8ba-68cb-4c5f-bbff-6550bbbb10f9", "cluster": "some-name", "namespace": "demand-backup-incremental-6143"} 2025-12-18T11:15:45.583Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "228de8ba-68cb-4c5f-bbff-6550bbbb10f9", "pod": "some-name-rs0-1"} 2025-12-18T11:15:45.583Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "228de8ba-68cb-4c5f-bbff-6550bbbb10f9", "pod": "some-name-rs0-2"} 2025-12-18T11:15:45.583Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "228de8ba-68cb-4c5f-bbff-6550bbbb10f9", "pod": "some-name-rs0-0"} 2025-12-18T11:15:45.583Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "228de8ba-68cb-4c5f-bbff-6550bbbb10f9", "sts": "some-name-rs0", "replset": "rs0"} 2025-12-18T11:15:45.583Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "228de8ba-68cb-4c5f-bbff-6550bbbb10f9", "command": ["/opt/percona/pbm", "describe-restore", "2025-12-18T11:12:38.887390155Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-12-18T11:15:45.730Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "228de8ba-68cb-4c5f-bbff-6550bbbb10f9", "status": {"type":"physical","opid":"","name":"2025-12-18T11:12:38.887390155Z","replsets":[{"name":"rs0","start_ts":0,"status":"","size":0,"size_uncompressed":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"size_uncompressed":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-12-18T11:15:50.731Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "bf297301-303a-41c9-9c70-1d97d1ee0b28", "cluster": "some-name", "namespace": "demand-backup-incremental-6143"} 2025-12-18T11:15:50.731Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "bf297301-303a-41c9-9c70-1d97d1ee0b28", "pod": "some-name-rs0-0"} 2025-12-18T11:15:50.731Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "bf297301-303a-41c9-9c70-1d97d1ee0b28", "pod": "some-name-rs0-1"} 2025-12-18T11:15:50.731Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "bf297301-303a-41c9-9c70-1d97d1ee0b28", "pod": "some-name-rs0-2"} 2025-12-18T11:15:50.731Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "bf297301-303a-41c9-9c70-1d97d1ee0b28", "sts": "some-name-rs0", "replset": "rs0"} 2025-12-18T11:15:50.731Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "bf297301-303a-41c9-9c70-1d97d1ee0b28", "command": ["/opt/percona/pbm", "describe-restore", "2025-12-18T11:12:38.887390155Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-12-18T11:15:51.364Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "bf297301-303a-41c9-9c70-1d97d1ee0b28", "status": {"type":"physical","opid":"","name":"2025-12-18T11:12:38.887390155Z","replsets":[{"name":"rs0","start_ts":0,"status":"","size":0,"size_uncompressed":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"size_uncompressed":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-12-18T11:15:56.365Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "dc2d3166-fb62-405b-8d69-e8eb4618df26", "cluster": "some-name", "namespace": "demand-backup-incremental-6143"} 2025-12-18T11:15:56.366Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "dc2d3166-fb62-405b-8d69-e8eb4618df26", "pod": "some-name-rs0-2"} 2025-12-18T11:15:56.366Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "dc2d3166-fb62-405b-8d69-e8eb4618df26", "pod": "some-name-rs0-0"} 2025-12-18T11:15:56.366Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "dc2d3166-fb62-405b-8d69-e8eb4618df26", "pod": "some-name-rs0-1"} 2025-12-18T11:15:56.366Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "dc2d3166-fb62-405b-8d69-e8eb4618df26", "sts": "some-name-rs0", "replset": "rs0"} 2025-12-18T11:15:56.366Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "dc2d3166-fb62-405b-8d69-e8eb4618df26", "command": ["/opt/percona/pbm", "describe-restore", "2025-12-18T11:12:38.887390155Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-12-18T11:15:56.510Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "dc2d3166-fb62-405b-8d69-e8eb4618df26", "status": {"type":"physical","opid":"","name":"2025-12-18T11:12:38.887390155Z","replsets":[{"name":"rs0","start_ts":0,"status":"","size":0,"size_uncompressed":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"size_uncompressed":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-12-18T11:16:01.511Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "ca8a6cf3-bb13-41ea-99b0-43d3f1eaa1b1", "cluster": "some-name", "namespace": "demand-backup-incremental-6143"} 2025-12-18T11:16:01.512Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "ca8a6cf3-bb13-41ea-99b0-43d3f1eaa1b1", "pod": "some-name-rs0-0"} 2025-12-18T11:16:01.512Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "ca8a6cf3-bb13-41ea-99b0-43d3f1eaa1b1", "pod": "some-name-rs0-1"} 2025-12-18T11:16:01.512Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "ca8a6cf3-bb13-41ea-99b0-43d3f1eaa1b1", "pod": "some-name-rs0-2"} 2025-12-18T11:16:01.512Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "ca8a6cf3-bb13-41ea-99b0-43d3f1eaa1b1", "sts": "some-name-rs0", "replset": "rs0"} 2025-12-18T11:16:01.512Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "ca8a6cf3-bb13-41ea-99b0-43d3f1eaa1b1", "command": ["/opt/percona/pbm", "describe-restore", "2025-12-18T11:12:38.887390155Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-12-18T11:16:01.692Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "ca8a6cf3-bb13-41ea-99b0-43d3f1eaa1b1", "status": {"type":"physical","opid":"","name":"2025-12-18T11:12:38.887390155Z","replsets":[{"name":"rs0","start_ts":0,"status":"","size":0,"size_uncompressed":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"size_uncompressed":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-12-18T11:16:06.692Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "1b6198a2-d526-444a-a915-d6c90fe5b414", "cluster": "some-name", "namespace": "demand-backup-incremental-6143"} 2025-12-18T11:16:06.693Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "1b6198a2-d526-444a-a915-d6c90fe5b414", "pod": "some-name-rs0-0"} 2025-12-18T11:16:06.693Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "1b6198a2-d526-444a-a915-d6c90fe5b414", "pod": "some-name-rs0-1"} 2025-12-18T11:16:06.693Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "1b6198a2-d526-444a-a915-d6c90fe5b414", "pod": "some-name-rs0-2"} 2025-12-18T11:16:06.693Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "1b6198a2-d526-444a-a915-d6c90fe5b414", "sts": "some-name-rs0", "replset": "rs0"} 2025-12-18T11:16:06.694Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "1b6198a2-d526-444a-a915-d6c90fe5b414", "command": ["/opt/percona/pbm", "describe-restore", "2025-12-18T11:12:38.887390155Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-12-18T11:16:06.930Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "1b6198a2-d526-444a-a915-d6c90fe5b414", "status": {"type":"physical","opid":"","name":"2025-12-18T11:12:38.887390155Z","replsets":[{"name":"rs0","start_ts":0,"status":"","size":0,"size_uncompressed":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"size_uncompressed":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-12-18T11:16:11.931Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "afe633c1-1544-4728-9dc1-bbe7329a3c7f", "cluster": "some-name", "namespace": "demand-backup-incremental-6143"} 2025-12-18T11:16:11.932Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "afe633c1-1544-4728-9dc1-bbe7329a3c7f", "pod": "some-name-rs0-0"} 2025-12-18T11:16:11.932Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "afe633c1-1544-4728-9dc1-bbe7329a3c7f", "pod": "some-name-rs0-1"} 2025-12-18T11:16:11.932Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "afe633c1-1544-4728-9dc1-bbe7329a3c7f", "pod": "some-name-rs0-2"} 2025-12-18T11:16:11.932Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "afe633c1-1544-4728-9dc1-bbe7329a3c7f", "sts": "some-name-rs0", "replset": "rs0"} 2025-12-18T11:16:11.932Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "afe633c1-1544-4728-9dc1-bbe7329a3c7f", "command": ["/opt/percona/pbm", "describe-restore", "2025-12-18T11:12:38.887390155Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-12-18T11:16:12.097Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "afe633c1-1544-4728-9dc1-bbe7329a3c7f", "status": {"type":"physical","opid":"","name":"2025-12-18T11:12:38.887390155Z","replsets":[{"name":"rs0","start_ts":0,"status":"","size":0,"size_uncompressed":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"size_uncompressed":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-12-18T11:16:17.098Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "71021859-6263-407d-9157-17691cd01ac2", "cluster": "some-name", "namespace": "demand-backup-incremental-6143"} 2025-12-18T11:16:17.099Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "71021859-6263-407d-9157-17691cd01ac2", "pod": "some-name-rs0-0"} 2025-12-18T11:16:17.099Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "71021859-6263-407d-9157-17691cd01ac2", "pod": "some-name-rs0-1"} 2025-12-18T11:16:17.099Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "71021859-6263-407d-9157-17691cd01ac2", "pod": "some-name-rs0-2"} 2025-12-18T11:16:17.099Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "71021859-6263-407d-9157-17691cd01ac2", "sts": "some-name-rs0", "replset": "rs0"} 2025-12-18T11:16:17.099Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "71021859-6263-407d-9157-17691cd01ac2", "command": ["/opt/percona/pbm", "describe-restore", "2025-12-18T11:12:38.887390155Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-12-18T11:16:17.275Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "71021859-6263-407d-9157-17691cd01ac2", "status": {"type":"physical","opid":"","name":"2025-12-18T11:12:38.887390155Z","replsets":[{"name":"rs0","start_ts":0,"status":"","size":0,"size_uncompressed":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"size_uncompressed":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-12-18T11:16:22.277Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "b0573db5-b398-45e0-919e-f0f51e3f33da", "cluster": "some-name", "namespace": "demand-backup-incremental-6143"} 2025-12-18T11:16:22.277Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "b0573db5-b398-45e0-919e-f0f51e3f33da", "pod": "some-name-rs0-2"} 2025-12-18T11:16:22.277Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "b0573db5-b398-45e0-919e-f0f51e3f33da", "pod": "some-name-rs0-0"} 2025-12-18T11:16:22.277Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "b0573db5-b398-45e0-919e-f0f51e3f33da", "pod": "some-name-rs0-1"} 2025-12-18T11:16:22.277Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "b0573db5-b398-45e0-919e-f0f51e3f33da", "sts": "some-name-rs0", "replset": "rs0"} 2025-12-18T11:16:22.277Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "b0573db5-b398-45e0-919e-f0f51e3f33da", "command": ["/opt/percona/pbm", "describe-restore", "2025-12-18T11:12:38.887390155Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-12-18T11:16:22.428Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "b0573db5-b398-45e0-919e-f0f51e3f33da", "status": {"type":"physical","opid":"","name":"2025-12-18T11:12:38.887390155Z","replsets":[{"name":"rs0","start_ts":0,"status":"","size":0,"size_uncompressed":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"size_uncompressed":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-12-18T11:16:27.429Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "e7d5291e-7b20-455d-bdcd-44c0c30eb771", "cluster": "some-name", "namespace": "demand-backup-incremental-6143"} 2025-12-18T11:16:27.430Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "e7d5291e-7b20-455d-bdcd-44c0c30eb771", "pod": "some-name-rs0-0"} 2025-12-18T11:16:27.430Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "e7d5291e-7b20-455d-bdcd-44c0c30eb771", "pod": "some-name-rs0-1"} 2025-12-18T11:16:27.430Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "e7d5291e-7b20-455d-bdcd-44c0c30eb771", "pod": "some-name-rs0-2"} 2025-12-18T11:16:27.430Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "e7d5291e-7b20-455d-bdcd-44c0c30eb771", "sts": "some-name-rs0", "replset": "rs0"} 2025-12-18T11:16:27.430Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "e7d5291e-7b20-455d-bdcd-44c0c30eb771", "command": ["/opt/percona/pbm", "describe-restore", "2025-12-18T11:12:38.887390155Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-12-18T11:16:27.592Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "e7d5291e-7b20-455d-bdcd-44c0c30eb771", "status": {"type":"physical","opid":"","name":"2025-12-18T11:12:38.887390155Z","replsets":[{"name":"rs0","start_ts":0,"status":"","size":0,"size_uncompressed":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"size_uncompressed":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-12-18T11:16:32.593Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "e9c70e0d-91ee-40b0-8300-d4e76ccda236", "cluster": "some-name", "namespace": "demand-backup-incremental-6143"} 2025-12-18T11:16:32.594Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "e9c70e0d-91ee-40b0-8300-d4e76ccda236", "pod": "some-name-rs0-0"} 2025-12-18T11:16:32.594Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "e9c70e0d-91ee-40b0-8300-d4e76ccda236", "pod": "some-name-rs0-1"} 2025-12-18T11:16:32.594Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "e9c70e0d-91ee-40b0-8300-d4e76ccda236", "pod": "some-name-rs0-2"} 2025-12-18T11:16:32.594Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "e9c70e0d-91ee-40b0-8300-d4e76ccda236", "sts": "some-name-rs0", "replset": "rs0"} 2025-12-18T11:16:32.594Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "e9c70e0d-91ee-40b0-8300-d4e76ccda236", "command": ["/opt/percona/pbm", "describe-restore", "2025-12-18T11:12:38.887390155Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-12-18T11:16:32.790Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "e9c70e0d-91ee-40b0-8300-d4e76ccda236", "status": {"type":"physical","opid":"","name":"2025-12-18T11:12:38.887390155Z","replsets":[{"name":"rs0","start_ts":0,"status":"","size":0,"size_uncompressed":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"size_uncompressed":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-12-18T11:16:37.790Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "4c894931-23ca-4658-9ace-fd177dda413b", "cluster": "some-name", "namespace": "demand-backup-incremental-6143"} 2025-12-18T11:16:37.791Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "4c894931-23ca-4658-9ace-fd177dda413b", "pod": "some-name-rs0-2"} 2025-12-18T11:16:37.791Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "4c894931-23ca-4658-9ace-fd177dda413b", "pod": "some-name-rs0-0"} 2025-12-18T11:16:37.791Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "4c894931-23ca-4658-9ace-fd177dda413b", "pod": "some-name-rs0-1"} 2025-12-18T11:16:37.791Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "4c894931-23ca-4658-9ace-fd177dda413b", "sts": "some-name-rs0", "replset": "rs0"} 2025-12-18T11:16:37.791Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "4c894931-23ca-4658-9ace-fd177dda413b", "command": ["/opt/percona/pbm", "describe-restore", "2025-12-18T11:12:38.887390155Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-12-18T11:16:37.985Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "4c894931-23ca-4658-9ace-fd177dda413b", "status": {"type":"physical","opid":"","name":"2025-12-18T11:12:38.887390155Z","replsets":[{"name":"rs0","start_ts":0,"status":"","size":0,"size_uncompressed":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"size_uncompressed":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-12-18T11:16:42.986Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "d1c2c33c-d121-4d01-bfc1-edea5cc44b5b", "cluster": "some-name", "namespace": "demand-backup-incremental-6143"} 2025-12-18T11:16:42.987Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "d1c2c33c-d121-4d01-bfc1-edea5cc44b5b", "pod": "some-name-rs0-0"} 2025-12-18T11:16:42.987Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "d1c2c33c-d121-4d01-bfc1-edea5cc44b5b", "pod": "some-name-rs0-1"} 2025-12-18T11:16:42.987Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "d1c2c33c-d121-4d01-bfc1-edea5cc44b5b", "pod": "some-name-rs0-2"} 2025-12-18T11:16:42.987Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "d1c2c33c-d121-4d01-bfc1-edea5cc44b5b", "sts": "some-name-rs0", "replset": "rs0"} 2025-12-18T11:16:42.987Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "d1c2c33c-d121-4d01-bfc1-edea5cc44b5b", "command": ["/opt/percona/pbm", "describe-restore", "2025-12-18T11:12:38.887390155Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-12-18T11:16:43.175Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "d1c2c33c-d121-4d01-bfc1-edea5cc44b5b", "status": {"type":"physical","opid":"","name":"2025-12-18T11:12:38.887390155Z","replsets":[{"name":"rs0","start_ts":0,"status":"","size":0,"size_uncompressed":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"size_uncompressed":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":0,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-12-18T11:16:48.176Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "759dcd33-193d-469b-ba87-709cd34ef2be", "cluster": "some-name", "namespace": "demand-backup-incremental-6143"} 2025-12-18T11:16:48.176Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "759dcd33-193d-469b-ba87-709cd34ef2be", "pod": "some-name-rs0-2"} 2025-12-18T11:16:48.176Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "759dcd33-193d-469b-ba87-709cd34ef2be", "pod": "some-name-rs0-0"} 2025-12-18T11:16:48.176Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "759dcd33-193d-469b-ba87-709cd34ef2be", "pod": "some-name-rs0-1"} 2025-12-18T11:16:48.176Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "759dcd33-193d-469b-ba87-709cd34ef2be", "sts": "some-name-rs0", "replset": "rs0"} 2025-12-18T11:16:48.176Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "759dcd33-193d-469b-ba87-709cd34ef2be", "command": ["/opt/percona/pbm", "describe-restore", "2025-12-18T11:12:38.887390155Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-12-18T11:16:48.378Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "759dcd33-193d-469b-ba87-709cd34ef2be", "status": {"type":"physical","opid":"","name":"2025-12-18T11:12:38.887390155Z","replsets":[{"name":"rs0","start_ts":0,"status":"error","size":0,"size_uncompressed":0,"last_transition_ts":1766056604,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","error":"wait for nodes in rs: check heartbeat in .pbm.restore/2025-12-18T11:12:38.887390155Z/rs.rs0/node.some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local:27017.hb: stuck, last beat ts: 1766056359","conditions":null}],"compression":"","store":{"type":""},"size":0,"size_uncompressed":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":1766056605,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"error","conditions":null,"n":null,"error":"move to running state: wait for cluster: cluster failed: 1766056604:move to running state: wait for nodes in rs: check heartbeat in .pbm.restore/2025-12-18T11:12:38.887390155Z/rs.rs0/node.some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local:27017.hb: stuck, last beat ts: 1766056359","pbm_version":"","balancer":""}} 2025-12-18T11:16:48.378Z INFO Restore state changed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"}, "namespace": "demand-backup-incremental-6143", "name": "restore-backup-gcp-cs", "reconcileID": "759dcd33-193d-469b-ba87-709cd34ef2be", "previous": "requested", "current": "error"} apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDBRestore metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDBRestore","metadata":{"annotations":{},"name":"restore-backup-gcp-cs","namespace":"demand-backup-incremental-6143"},"spec":{"backupName":"backup-gcp-cs","clusterName":"some-name"}} creationTimestamp: "2025-12-18T11:11:05Z" generation: 1 name: restore-backup-gcp-cs namespace: demand-backup-incremental-6143 resourceVersion: "1766056608392639024" uid: cdc26b73-1a49-43ae-9044-d69c6b0b076e spec: backupName: backup-gcp-cs clusterName: some-name status: error: 'move to running state: wait for cluster: cluster failed: 1766056604:move to running state: wait for nodes in rs: check heartbeat in .pbm.restore/2025-12-18T11:12:38.887390155Z/rs.rs0/node.some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local:27017.hb: stuck, last beat ts: 1766056359' pbmName: "2025-12-18T11:12:38.887390155Z" state: error Name: restore-backup-gcp-cs Namespace: demand-backup-incremental-6143 Labels: Annotations: API Version: psmdb.percona.com/v1 Kind: PerconaServerMongoDBRestore Metadata: Creation Timestamp: 2025-12-18T11:11:05Z Generation: 1 Resource Version: 1766056608392639024 UID: cdc26b73-1a49-43ae-9044-d69c6b0b076e Spec: Backup Name: backup-gcp-cs Cluster Name: some-name Status: Error: move to running state: wait for cluster: cluster failed: 1766056604:move to running state: wait for nodes in rs: check heartbeat in .pbm.restore/2025-12-18T11:12:38.887390155Z/rs.rs0/node.some-name-rs0-1.some-name-rs0.demand-backup-incremental-6143.svc.cluster.local:27017.hb: stuck, last beat ts: 1766056359 Pbm Name: 2025-12-18T11:12:38.887390155Z State: error Events: Restore object restore-backup-gcp-cs is in error state after 5 minutes. something went wrong with operator or kubernetes cluster