Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/e2e-tests/logs/pitr-physical-backup-source.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + main + create_infra pitr-physical-backup-source-22129 + local ns=pitr-physical-backup-source-22129 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.K5tRjkJrOp ++ mktemp + local LAST_ERR=/tmp/tmp.25xy5A7cCL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.K5tRjkJrOp customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.25xy5A7cCL + rm /tmp/tmp.K5tRjkJrOp /tmp/tmp.25xy5A7cCL + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.3Zx3ubTnID ++ mktemp + local LAST_ERR=/tmp/tmp.YmwgLwe3Bl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3Zx3ubTnID + cat /tmp/tmp.YmwgLwe3Bl + rm /tmp/tmp.3Zx3ubTnID /tmp/tmp.YmwgLwe3Bl + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.cEFz2IYft5 ++ mktemp + local LAST_ERR=/tmp/tmp.CbelT94bal + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cEFz2IYft5 + cat /tmp/tmp.CbelT94bal + rm /tmp/tmp.cEFz2IYft5 /tmp/tmp.CbelT94bal + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.VkI9GWJFJI ++ mktemp + local LAST_ERR=/tmp/tmp.LVSiWDOmbz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VkI9GWJFJI + cat /tmp/tmp.LVSiWDOmbz + rm /tmp/tmp.VkI9GWJFJI /tmp/tmp.LVSiWDOmbz + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.8ThEdK7vBg ++ mktemp + local LAST_ERR=/tmp/tmp.Xu0CHxuhVs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8ThEdK7vBg clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.Xu0CHxuhVs + rm /tmp/tmp.8ThEdK7vBg /tmp/tmp.Xu0CHxuhVs + return 0 + check_crd_for_deletion PR-2247-091270b4 + local git_tag=PR-2247-091270b4 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2247-091270b4/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.041GBKNNBD +++ mktemp ++ local LAST_ERR=/tmp/tmp.yBi7cSsMsu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.041GBKNNBD ++ cat /tmp/tmp.yBi7cSsMsu Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.041GBKNNBD ++ cat /tmp/tmp.yBi7cSsMsu Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.041GBKNNBD ++ cat /tmp/tmp.yBi7cSsMsu Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.041GBKNNBD ++ cat /tmp/tmp.yBi7cSsMsu Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.041GBKNNBD /tmp/tmp.yBi7cSsMsu ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + awk '{print$1}' + kubectl_bin get ns + xargs kubectl delete ns + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.wEZlEgSIrn + local LAST_OUT=/tmp/tmp.RyEyJFpWmO ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.6jChSwrM13 + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.HaZeEOyXGQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + for i in $(seq 0 2) + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RyEyJFpWmO + cat /tmp/tmp.HaZeEOyXGQ + rm /tmp/tmp.RyEyJFpWmO /tmp/tmp.HaZeEOyXGQ + return 0 namespace "pitr-physical-backup-source-7601" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wEZlEgSIrn namespace "psmdb-operator" deleted + cat /tmp/tmp.6jChSwrM13 + rm /tmp/tmp.wEZlEgSIrn /tmp/tmp.6jChSwrM13 + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.yONnDC0TpC ++ mktemp + local LAST_ERR=/tmp/tmp.uyIUKztv1l + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yONnDC0TpC + cat /tmp/tmp.uyIUKztv1l + rm /tmp/tmp.yONnDC0TpC /tmp/tmp.uyIUKztv1l + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.UCy8Ol4uAE ++ mktemp + local LAST_ERR=/tmp/tmp.78Otv0yI86 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UCy8Ol4uAE namespace/psmdb-operator created + cat /tmp/tmp.78Otv0yI86 + rm /tmp/tmp.UCy8Ol4uAE /tmp/tmp.78Otv0yI86 + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.9TSwSXBxL8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.zHZMw7up8l ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9TSwSXBxL8 ++ cat /tmp/tmp.zHZMw7up8l ++ rm /tmp/tmp.9TSwSXBxL8 /tmp/tmp.zHZMw7up8l ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2247-091270b4-5-cluster13 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.HIuWnWYEAa ++ mktemp + local LAST_ERR=/tmp/tmp.fAOdOh6XUr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2247-091270b4-5-cluster13 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HIuWnWYEAa Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2247-091270b4-5-cluster13" modified. + cat /tmp/tmp.fAOdOh6XUr + rm /tmp/tmp.HIuWnWYEAa /tmp/tmp.fAOdOh6XUr + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2247-091270b4' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2247-091270b4 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/e2e-tests/pitr-physical-backup-source/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.fmdmVIutbI ++ mktemp + local LAST_ERR=/tmp/tmp.aoM4LRdq5h + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fmdmVIutbI customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.aoM4LRdq5h + rm /tmp/tmp.fmdmVIutbI /tmp/tmp.aoM4LRdq5h + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.XtX9vIVYxN ++ mktemp + local LAST_ERR=/tmp/tmp.10NYOGObee + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XtX9vIVYxN clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.10NYOGObee + rm /tmp/tmp.XtX9vIVYxN /tmp/tmp.10NYOGObee + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2247-091270b4") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.nbXZ7axSSG ++ mktemp + local LAST_ERR=/tmp/tmp.KnWQGQdhFd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nbXZ7axSSG deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.KnWQGQdhFd + rm /tmp/tmp.nbXZ7axSSG /tmp/tmp.KnWQGQdhFd + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.5CFb2mBP2o +++ mktemp ++ local LAST_ERR=/tmp/tmp.nu9DKRLGky ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5CFb2mBP2o ++ cat /tmp/tmp.nu9DKRLGky ++ rm /tmp/tmp.5CFb2mBP2o /tmp/tmp.nu9DKRLGky ++ return 0 + wait_operator_pod percona-server-mongodb-operator-64bc9b5fc9-mmkzl + local pod=percona-server-mongodb-operator-64bc9b5fc9-mmkzl + set +o xtrace waiting for pod/percona-server-mongodb-operator-64bc9b5fc9-mmkzl to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.m6ms9Z0McA +++ mktemp ++ local LAST_ERR=/tmp/tmp.3388liPr2M ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.m6ms9Z0McA ++ cat /tmp/tmp.3388liPr2M ++ rm /tmp/tmp.m6ms9Z0McA /tmp/tmp.3388liPr2M ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-64bc9b5fc9-mmkzl ++ mktemp + local LAST_OUT=/tmp/tmp.LSVCE19Apv ++ mktemp + local LAST_ERR=/tmp/tmp.g2CuVpnvjT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-64bc9b5fc9-mmkzl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LSVCE19Apv + cat /tmp/tmp.g2CuVpnvjT + rm /tmp/tmp.LSVCE19Apv /tmp/tmp.g2CuVpnvjT + return 0 2026-02-19T14:49:06.418Z INFO setup Manager starting up {"gitCommit": "091270b48773244359d74f74a10340fc1c0cff51", "gitBranch": "PR-2247-091270b4", "buildTime": "", "goVersion": "go1.25.7", "os": "linux", "arch": "amd64"} + create_namespace pitr-physical-backup-source-22129 + local namespace=pitr-physical-backup-source-22129 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces pitr-physical-backup-source-22129' + set +o xtrace + awk '{print$1}' ----------------------------------------------------------------------------------- ++ mktemp cleaned up old namespaces pitr-physical-backup-source-22129 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pitr-physical-backup-source-22129 --ignore-not-found + xargs kubectl delete ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.i4fo58jUIq ++ mktemp + local LAST_ERR=/tmp/tmp.WCwyTyfX1o + local exit_status=0 + local timeout=4 + local LAST_OUT=/tmp/tmp.m4EhR5Cvru ++ seq 0 2 ++ mktemp + local LAST_ERR=/tmp/tmp.zrgvtkQ1P9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace pitr-physical-backup-source-22129 --ignore-not-found + for i in $(seq 0 2) + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.i4fo58jUIq + cat /tmp/tmp.WCwyTyfX1o + rm /tmp/tmp.i4fo58jUIq /tmp/tmp.WCwyTyfX1o + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.m4EhR5Cvru + cat /tmp/tmp.zrgvtkQ1P9 + rm /tmp/tmp.m4EhR5Cvru /tmp/tmp.zrgvtkQ1P9 + return 0 + kubectl_bin wait --for=delete namespace pitr-physical-backup-source-22129 ++ mktemp + local LAST_OUT=/tmp/tmp.l8emnvzgSY ++ mktemp + local LAST_ERR=/tmp/tmp.ZPNKTirCYJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace pitr-physical-backup-source-22129 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.l8emnvzgSY + cat /tmp/tmp.ZPNKTirCYJ + rm /tmp/tmp.l8emnvzgSY /tmp/tmp.ZPNKTirCYJ + return 0 + desc 'create namespace pitr-physical-backup-source-22129' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pitr-physical-backup-source-22129 ----------------------------------------------------------------------------------- + kubectl_bin create namespace pitr-physical-backup-source-22129 ++ mktemp + local LAST_OUT=/tmp/tmp.VV2sjXbCG0 ++ mktemp + local LAST_ERR=/tmp/tmp.3KqgC0rwbj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace pitr-physical-backup-source-22129 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VV2sjXbCG0 namespace/pitr-physical-backup-source-22129 created + cat /tmp/tmp.3KqgC0rwbj + rm /tmp/tmp.VV2sjXbCG0 /tmp/tmp.3KqgC0rwbj + return 0 + set_kube_ctx pitr-physical-backup-source-22129 + local namespace=pitr-physical-backup-source-22129 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.A6ZaKd7QCU +++ mktemp ++ local LAST_ERR=/tmp/tmp.BGwDRbnqFc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.A6ZaKd7QCU ++ cat /tmp/tmp.BGwDRbnqFc ++ rm /tmp/tmp.A6ZaKd7QCU /tmp/tmp.BGwDRbnqFc ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2247-091270b4-5-cluster13 --namespace=pitr-physical-backup-source-22129 ++ mktemp + local LAST_OUT=/tmp/tmp.QqPvyNJDQL ++ mktemp + local LAST_ERR=/tmp/tmp.sxp7EPCrwC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2247-091270b4-5-cluster13 --namespace=pitr-physical-backup-source-22129 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QqPvyNJDQL Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2247-091270b4-5-cluster13" modified. + cat /tmp/tmp.sxp7EPCrwC + rm /tmp/tmp.QqPvyNJDQL /tmp/tmp.sxp7EPCrwC + return 0 + deploy_minio + local cert_secret= + local service_name=minio-service + desc 'install MinIO: minio-service' + set +o xtrace ----------------------------------------------------------------------------------- install MinIO: minio-service ----------------------------------------------------------------------------------- + helm uninstall minio-service + : + helm repo remove minio + : + helm repo add minio https://charts.min.io/ "minio" already exists with the same configuration, skipping + local endpoint=http://minio-service:9000 + minio_args=('--version' '5.4.0' '--set' 'replicas=1' '--set' 'mode=standalone' '--set' 'resources.requests.memory=256Mi' '--set' 'rootUser=rootuser' '--set' 'rootPassword=rootpass123' '--set' 'users[0].accessKey=some-access-key' '--set' 'users[0].secretKey=some-secret-key' '--set' 'users[0].policy=consoleAdmin' '--set' 'service.type=ClusterIP' '--set' 'configPathmc=/tmp/' '--set' 'securityContext.enabled=false' '--set' 'persistence.size=2G' '--set' 'fullnameOverride=minio-service' '--set' 'serviceAccount.create=true' '--set' 'serviceAccount.name=minio-service-sa') + local minio_args + [[ -n '' ]] + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio NAME: minio-service LAST DEPLOYED: Thu Feb 19 14:49:46 2026 NAMESPACE: pitr-physical-backup-source-22129 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.pitr-physical-backup-source-22129.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace pitr-physical-backup-source-22129 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace pitr-physical-backup-source-22129 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace pitr-physical-backup-source-22129 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace pitr-physical-backup-source-22129 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PpiqKhXwHg +++ mktemp ++ local LAST_ERR=/tmp/tmp.VwsD3077Xi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PpiqKhXwHg ++ cat /tmp/tmp.VwsD3077Xi ++ rm /tmp/tmp.PpiqKhXwHg /tmp/tmp.VwsD3077Xi ++ return 0 + local MINIO_POD=minio-service-6d5f646cdc-q96vx + wait_pod minio-service-6d5f646cdc-q96vx + local pod=minio-service-6d5f646cdc-q96vx + set +o xtrace waiting for pod/minio-service-6d5f646cdc-q96vx to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-physical-backup-source-22129.svc.cluster.local --tcp=9000 service/minio-service created + create_minio_bucket operator-testing http://minio-service:9000 + local bucket=operator-testing + local endpoint=http://minio-service:9000 + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.zLuqzXMUdE ++ mktemp + local LAST_ERR=/tmp/tmp.KqMIzGYtAu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zLuqzXMUdE make_bucket: operator-testing pod "aws-cli" deleted from pitr-physical-backup-source-22129 namespace + cat /tmp/tmp.KqMIzGYtAu All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: Internal error occurred: error attaching to container: container is in CONTAINER_EXITED state + rm /tmp/tmp.zLuqzXMUdE /tmp/tmp.KqMIzGYtAu + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/e2e-tests/conf/minio-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.SNPCGeZABT ++ mktemp + local LAST_ERR=/tmp/tmp.IkaoAZDebe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/e2e-tests/conf/minio-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SNPCGeZABT secret/some-users created deployment.apps/psmdb-client created secret/minio-secret created + cat /tmp/tmp.IkaoAZDebe + rm /tmp/tmp.SNPCGeZABT /tmp/tmp.IkaoAZDebe + return 0 + cluster=some-name + desc 'create first PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/e2e-tests/pitr-physical-backup-source/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/e2e-tests/pitr-physical-backup-source/conf/some-name-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/e2e-tests/pitr-physical-backup-source/conf/some-name-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2247-091270b4"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' ++ mktemp + /usr/sbin/sed -e s/NAME_SPACE/pitr-physical-backup-source-22129/g + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.t3byVHvOMy ++ mktemp + local LAST_ERR=/tmp/tmp.ESTVtapcvu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.t3byVHvOMy perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.ESTVtapcvu + rm /tmp/tmp.t3byVHvOMy /tmp/tmp.ESTVtapcvu + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.......OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9XymfwXaU9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.G5gJvJKMaX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9XymfwXaU9 ++ cat /tmp/tmp.G5gJvJKMaX ++ rm /tmp/tmp.9XymfwXaU9 /tmp/tmp.G5gJvJKMaX ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oXMqH9dpCE +++ mktemp ++ local LAST_ERR=/tmp/tmp.qk5pjhoBHq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oXMqH9dpCE ++ cat /tmp/tmp.qk5pjhoBHq ++ rm /tmp/tmp.oXMqH9dpCE /tmp/tmp.qk5pjhoBHq ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pZOb7NREEb +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZeK7KKoBnh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pZOb7NREEb ++ cat /tmp/tmp.ZeK7KKoBnh ++ rm /tmp/tmp.pZOb7NREEb /tmp/tmp.ZeK7KKoBnh ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness......................................................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9OPp3Kw6Ws +++ mktemp ++ local LAST_ERR=/tmp/tmp.39TFJwsS4c ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9OPp3Kw6Ws ++ cat /tmp/tmp.39TFJwsS4c ++ rm /tmp/tmp.9OPp3Kw6Ws /tmp/tmp.39TFJwsS4c ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mcRqx9I9Ky +++ mktemp ++ local LAST_ERR=/tmp/tmp.WEUCumCsrN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mcRqx9I9Ky ++ cat /tmp/tmp.WEUCumCsrN ++ rm /tmp/tmp.mcRqx9I9Ky /tmp/tmp.WEUCumCsrN ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4g1u1fVxzt +++ mktemp ++ local LAST_ERR=/tmp/tmp.moshlAT7pY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4g1u1fVxzt ++ cat /tmp/tmp.moshlAT7pY ++ rm /tmp/tmp.4g1u1fVxzt /tmp/tmp.moshlAT7pY ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + sleep 10 + write_initial_data + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-22129 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-22129 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.15d7KkWg6n +++ mktemp ++ local LAST_ERR=/tmp/tmp.yWL8o1Tw9d ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.15d7KkWg6n ++ cat /tmp/tmp.yWL8o1Tw9d ++ rm /tmp/tmp.15d7KkWg6n /tmp/tmp.yWL8o1Tw9d ++ return 0 + local client_container=psmdb-client-86cb5d8484-qqm2k + kubectl_bin exec psmdb-client-86cb5d8484-qqm2k -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.SRQjXADu3y ++ mktemp + local LAST_ERR=/tmp/tmp.Dds9h7GfXt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-qqm2k -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SRQjXADu3y Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("48fa492d-82f8-4745-a779-88bde555a721") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.Dds9h7GfXt + rm /tmp/tmp.SRQjXADu3y /tmp/tmp.Dds9h7GfXt + return 0 + sleep 2 + write_document + local cmp_postfix= + local sleep_value=0 + log 'write initial data, read from all' + set +o xtrace [2026-02-19T14:54:22+0000] write initial data, read from all + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yDW4kABHNB +++ mktemp ++ local LAST_ERR=/tmp/tmp.ls5LuogC29 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yDW4kABHNB ++ cat /tmp/tmp.ls5LuogC29 ++ rm /tmp/tmp.yDW4kABHNB /tmp/tmp.ls5LuogC29 ++ return 0 + local client_container=psmdb-client-86cb5d8484-qqm2k + kubectl_bin exec psmdb-client-86cb5d8484-qqm2k -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.EOcz4ozoQq ++ mktemp + local LAST_ERR=/tmp/tmp.clZc2d9P1c + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-qqm2k -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EOcz4ozoQq Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("668f567b-44cd-4431-99b4-46e076debfe0") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.clZc2d9P1c + rm /tmp/tmp.EOcz4ozoQq /tmp/tmp.clZc2d9P1c + return 0 + sleep 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129 + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-02-19T14:54:25+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GxBqXt26i2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.TlsLmeFo9E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GxBqXt26i2 ++ cat /tmp/tmp.TlsLmeFo9E ++ rm /tmp/tmp.GxBqXt26i2 /tmp/tmp.TlsLmeFo9E ++ return 0 + local client_container=psmdb-client-86cb5d8484-qqm2k + kubectl_bin exec psmdb-client-86cb5d8484-qqm2k -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.uITTWLyxI9 ++ mktemp + local LAST_ERR=/tmp/tmp.YVO8wr56ZJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-qqm2k -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uITTWLyxI9 + cat /tmp/tmp.YVO8wr56ZJ + rm /tmp/tmp.uITTWLyxI9 /tmp/tmp.YVO8wr56ZJ + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/e2e-tests/pitr-physical-backup-source/compare/find.json /tmp/tmp.ODbWEGhaNi/find + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-0...2026-02-19T14:53:33.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-1...2026-02-19T14:53:32.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-2...2026-02-19T14:53:45.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-0 + local agent_pod=some-name-rs1-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-0...2026-02-19T14:53:35.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-1 + local agent_pod=some-name-rs1-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-1...2026-02-19T14:53:33.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-2 + local agent_pod=some-name-rs1-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-2...2026-02-19T14:53:46.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-0 + local agent_pod=some-name-rs2-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-0...2026-02-19T14:53:36.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-1 + local agent_pod=some-name-rs2-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-1...2026-02-19T14:53:35.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-2 + local agent_pod=some-name-rs2-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-2...2026-02-19T14:53:46.000+0000 I listening for the commands + echo 'Sleeping for 360 seconds' Sleeping for 360 seconds + sleep 360 + backup_name_minio=backup-minio + desc 'restore pitr type date using backupSource' + set +o xtrace ----------------------------------------------------------------------------------- restore pitr type date using backupSource ----------------------------------------------------------------------------------- + run_backup backup-minio 1 physical + local name=backup-minio + local idx=1 + local type=physical + desc 'run backup backup-minio-1' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-1 ----------------------------------------------------------------------------------- + /usr/sbin/sed -e 's/name:/name: backup-minio-1/' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/e2e-tests/pitr-physical-backup-source/conf/backup-minio.yml + /usr/sbin/sed -e 's/type:/type: physical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.UFgAfTkV8g ++ mktemp + local LAST_ERR=/tmp/tmp.fauHaSRWBO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UFgAfTkV8g perconaservermongodbbackup.psmdb.percona.com/backup-minio-1 created + cat /tmp/tmp.fauHaSRWBO + rm /tmp/tmp.UFgAfTkV8g /tmp/tmp.fauHaSRWBO + return 0 + wait_backup backup-minio-1 + local backup_name=backup-minio-1 + local target_state=ready + set +o xtrace waiting for backup-minio-1 to reach ready state........OK + compare_latest_restorable_time some-name-rs0 backup-minio-1 + local cluster=some-name-rs0 + local backup_name=backup-minio-1 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jGDpUb4viQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.jDHRLoxG6o +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.jGDpUb4viQ +++ cat /tmp/tmp.jDHRLoxG6o +++ rm /tmp/tmp.jGDpUb4viQ /tmp/tmp.jDHRLoxG6o +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8NlMOdreKc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qlHAkYg2CS +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.8NlMOdreKc +++ cat /tmp/tmp.qlHAkYg2CS +++ rm /tmp/tmp.8NlMOdreKc /tmp/tmp.qlHAkYg2CS +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 2 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.biJ2LNOba5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.F8DAIx1Gm8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.biJ2LNOba5 +++ cat /tmp/tmp.F8DAIx1Gm8 +++ rm /tmp/tmp.biJ2LNOba5 /tmp/tmp.F8DAIx1Gm8 +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 3 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.t6ujpDCiWX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.74Cxr2EQdp +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.t6ujpDCiWX +++ cat /tmp/tmp.74Cxr2EQdp +++ rm /tmp/tmp.t6ujpDCiWX /tmp/tmp.74Cxr2EQdp +++ return 0 ++ first_timestamp=1771513264 ++ sleep 5 ++ [[ 1771513264 != '' ]] ++ [[ 1771513264 != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Wl0r8emZE3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aHx6sLCzT7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Wl0r8emZE3 +++ cat /tmp/tmp.aHx6sLCzT7 +++ rm /tmp/tmp.Wl0r8emZE3 /tmp/tmp.aHx6sLCzT7 +++ return 0 ++ second_timestamp=1771513264 ++ let retry+=1 ++ [[ 4 -gt 30 ]] ++ [[ 1771513264 != '' ]] ++ [[ 1771513264 != \n\u\l\l ]] ++ [[ 1771513264 == 1771513264 ]] ++ /usr/sbin/date -u -d @1771513264 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2026-02-19T15:01:04Z ++ get_latest_restorable_time_from_backup_object backup-minio-1 ++ local backup_name=backup-minio-1 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-1 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.A8tX3tixvt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SkhKj8l3Yr +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb-backup backup-minio-1 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.A8tX3tixvt +++ cat /tmp/tmp.SkhKj8l3Yr +++ rm /tmp/tmp.A8tX3tixvt /tmp/tmp.SkhKj8l3Yr +++ return 0 ++ latestRestorableTime=2026-02-19T15:01:04Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2026-02-19T15:01:04Z != '' ]] ++ [[ 2026-02-19T15:01:04Z != \n\u\l\l ]] ++ echo 2026-02-19T15:01:04Z + backup_time=2026-02-19T15:01:04Z + [[ 2026-02-19T15:01:04Z != \2\0\2\6\-\0\2\-\1\9\T\1\5\:\0\1\:\0\4\Z ]] + reset_collection + desc 'reset data' + set +o xtrace ----------------------------------------------------------------------------------- reset data ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.remove({})' myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129 + local 'command=use myApp\n db.test.remove({})' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.M5G5ENvWh0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.CZNIE9hOxn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M5G5ENvWh0 ++ cat /tmp/tmp.CZNIE9hOxn ++ rm /tmp/tmp.M5G5ENvWh0 /tmp/tmp.CZNIE9hOxn ++ return 0 + local client_container=psmdb-client-86cb5d8484-qqm2k + kubectl_bin exec psmdb-client-86cb5d8484-qqm2k -- bash -c 'printf '\''use myApp\n db.test.remove({})\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.yjFUy3Z98M ++ mktemp + local LAST_ERR=/tmp/tmp.oIlinzsty3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-qqm2k -- bash -c 'printf '\''use myApp\n db.test.remove({})\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yjFUy3Z98M Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("4fa1de4b-0062-4b87-88b5-e4f9581f89c8") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nRemoved" : 1 }) bye + cat /tmp/tmp.oIlinzsty3 + rm /tmp/tmp.yjFUy3Z98M /tmp/tmp.oIlinzsty3 + return 0 + sleep 2 + write_document '' 120 + local cmp_postfix= + local sleep_value=120 + log 'write initial data, read from all' + set +o xtrace [2026-02-19T15:01:34+0000] write initial data, read from all + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jYWICn4kCL +++ mktemp ++ local LAST_ERR=/tmp/tmp.Pfm0xxSbEq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jYWICn4kCL ++ cat /tmp/tmp.Pfm0xxSbEq ++ rm /tmp/tmp.jYWICn4kCL /tmp/tmp.Pfm0xxSbEq ++ return 0 + local client_container=psmdb-client-86cb5d8484-qqm2k + kubectl_bin exec psmdb-client-86cb5d8484-qqm2k -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.P4bZTnHyOy ++ mktemp + local LAST_ERR=/tmp/tmp.zaIsxQGDMr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-qqm2k -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.P4bZTnHyOy Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("014404d0-7ac6-4fc1-a61c-e5029ed3e176") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.zaIsxQGDMr + rm /tmp/tmp.P4bZTnHyOy /tmp/tmp.zaIsxQGDMr + return 0 + sleep 120 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129 + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-02-19T15:03:36+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.31FyTTtq1s +++ mktemp ++ local LAST_ERR=/tmp/tmp.V1ybWR5dRU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.31FyTTtq1s ++ cat /tmp/tmp.V1ybWR5dRU ++ rm /tmp/tmp.31FyTTtq1s /tmp/tmp.V1ybWR5dRU ++ return 0 + local client_container=psmdb-client-86cb5d8484-qqm2k + kubectl_bin exec psmdb-client-86cb5d8484-qqm2k -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.JhraknTya7 ++ mktemp + local LAST_ERR=/tmp/tmp.5GRoIxjZiJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-qqm2k -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JhraknTya7 + cat /tmp/tmp.5GRoIxjZiJ + rm /tmp/tmp.JhraknTya7 /tmp/tmp.5GRoIxjZiJ + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/e2e-tests/pitr-physical-backup-source/compare/find.json /tmp/tmp.ODbWEGhaNi/find ++ run_mongos 'new Date().getTime() / 1000' myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129 mongodb '' --quiet ++ local 'command=new Date().getTime() / 1000' ++ local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129 ++ local driver=mongodb ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match' ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local port=27017 ++ local mongo_bin=mongo ++ cut -d. -f1 +++ awk -F: '{print $2}' +++ echo .svc.cluster.local ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8x9J22O3az ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KK9WnvV7ih +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.8x9J22O3az +++ cat /tmp/tmp.KK9WnvV7ih +++ rm /tmp/tmp.8x9J22O3az /tmp/tmp.KK9WnvV7ih +++ return 0 ++ local client_container=psmdb-client-86cb5d8484-qqm2k ++ kubectl_bin exec psmdb-client-86cb5d8484-qqm2k -- bash -c 'printf '\''new Date().getTime() / 1000\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wZzDBkmU6e +++ mktemp ++ local LAST_ERR=/tmp/tmp.g06JiXlpfK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-86cb5d8484-qqm2k -- bash -c 'printf '\''new Date().getTime() / 1000\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wZzDBkmU6e ++ cat /tmp/tmp.g06JiXlpfK ++ rm /tmp/tmp.wZzDBkmU6e /tmp/tmp.g06JiXlpfK ++ return 0 + time_now=1771513421 + check_recovery backup-minio-1 date 1771513421 '' some-name backupSource + local backup_name=backup-minio-1 + local restore_type=date + local restore_date=1771513421 + local cmp_postfix= + local cluster_name=some-name + local backupSource=backupSource ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ local LAST_OUT=/tmp/tmp.hD9f3f0973 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xRwVn1cmnf +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.hD9f3f0973 +++ cat /tmp/tmp.xRwVn1cmnf +++ rm /tmp/tmp.hD9f3f0973 /tmp/tmp.xRwVn1cmnf +++ return 0 ++ echo 1771513388 + local latest_ts=1771513388 + desc 'write more data before restore by date' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by date ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8ci3QKLDjN +++ mktemp ++ local LAST_ERR=/tmp/tmp.KUyiPPUW7r ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8ci3QKLDjN ++ cat /tmp/tmp.KUyiPPUW7r ++ rm /tmp/tmp.8ci3QKLDjN /tmp/tmp.KUyiPPUW7r ++ return 0 + local client_container=psmdb-client-86cb5d8484-qqm2k + kubectl_bin exec psmdb-client-86cb5d8484-qqm2k -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.8DPZN2DpDc ++ mktemp + local LAST_ERR=/tmp/tmp.9PeDMVAxhx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-qqm2k -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8DPZN2DpDc Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-22129.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("b3fbe9e8-679e-4b48-9c70-6c9f7be67c5d") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.9PeDMVAxhx + rm /tmp/tmp.8DPZN2DpDc /tmp/tmp.9PeDMVAxhx + return 0 + [[ -n 1771513421 ]] ++ format_date 1771513421 ++ local timestamp=1771513421 +++ TZ=UTC +++ /usr/sbin/date -d@1771513421 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:41 + desc 'Restoring to time 2026-02-19 15:03:41' + set +o xtrace ----------------------------------------------------------------------------------- Restoring to time 2026-02-19 15:03:41 ----------------------------------------------------------------------------------- + retries=0 + [[ 1771513388 -gt 1771513421 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.O0yOt0W6NM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vymeWzZCuE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.O0yOt0W6NM +++ cat /tmp/tmp.vymeWzZCuE +++ rm /tmp/tmp.O0yOt0W6NM /tmp/tmp.vymeWzZCuE +++ return 0 ++ echo 1771513388 + latest_ts=1771513388 + retries=1 ++ format_date 1771513388 ++ local timestamp=1771513388 +++ TZ=UTC +++ /usr/sbin/date -d@1771513388 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:08 ++ format_date 1771513421 ++ local timestamp=1771513421 +++ TZ=UTC +++ /usr/sbin/date -d@1771513421 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:41 + echo 'Waiting for last oplog chunk (2026-02-19 15:03:08) to be greater than restore target (2026-02-19 15:03:41)' Waiting for last oplog chunk (2026-02-19 15:03:08) to be greater than restore target (2026-02-19 15:03:41) + sleep 10 + [[ 1771513388 -gt 1771513421 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6Tu0gSYjbT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hlxsAmwaw2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6Tu0gSYjbT +++ cat /tmp/tmp.hlxsAmwaw2 +++ rm /tmp/tmp.6Tu0gSYjbT /tmp/tmp.hlxsAmwaw2 +++ return 0 ++ echo 1771513388 + latest_ts=1771513388 + retries=2 ++ format_date 1771513388 ++ local timestamp=1771513388 +++ TZ=UTC +++ /usr/sbin/date -d@1771513388 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:08 ++ format_date 1771513421 ++ local timestamp=1771513421 +++ TZ=UTC +++ /usr/sbin/date -d@1771513421 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:41 + echo 'Waiting for last oplog chunk (2026-02-19 15:03:08) to be greater than restore target (2026-02-19 15:03:41)' Waiting for last oplog chunk (2026-02-19 15:03:08) to be greater than restore target (2026-02-19 15:03:41) + sleep 10 + [[ 1771513388 -gt 1771513421 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fMxPtcSgnX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iRgXUQPrVa +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.fMxPtcSgnX +++ cat /tmp/tmp.iRgXUQPrVa +++ rm /tmp/tmp.fMxPtcSgnX /tmp/tmp.iRgXUQPrVa +++ return 0 ++ echo 1771513388 + latest_ts=1771513388 + retries=3 ++ format_date 1771513388 ++ local timestamp=1771513388 +++ TZ=UTC +++ /usr/sbin/date -d@1771513388 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:08 ++ format_date 1771513421 ++ local timestamp=1771513421 +++ TZ=UTC +++ /usr/sbin/date -d@1771513421 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:41 + echo 'Waiting for last oplog chunk (2026-02-19 15:03:08) to be greater than restore target (2026-02-19 15:03:41)' Waiting for last oplog chunk (2026-02-19 15:03:08) to be greater than restore target (2026-02-19 15:03:41) + sleep 10 + [[ 1771513388 -gt 1771513421 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dFLHXvq11v ++++ mktemp +++ local LAST_ERR=/tmp/tmp.asFiaoPN8o +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.dFLHXvq11v +++ cat /tmp/tmp.asFiaoPN8o +++ rm /tmp/tmp.dFLHXvq11v /tmp/tmp.asFiaoPN8o +++ return 0 ++ echo 1771513388 + latest_ts=1771513388 + retries=4 ++ format_date 1771513388 ++ local timestamp=1771513388 +++ TZ=UTC +++ /usr/sbin/date -d@1771513388 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:08 ++ format_date 1771513421 ++ local timestamp=1771513421 +++ TZ=UTC +++ /usr/sbin/date -d@1771513421 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:41 + echo 'Waiting for last oplog chunk (2026-02-19 15:03:08) to be greater than restore target (2026-02-19 15:03:41)' Waiting for last oplog chunk (2026-02-19 15:03:08) to be greater than restore target (2026-02-19 15:03:41) + sleep 10 + [[ 1771513388 -gt 1771513421 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RtfvVynfSX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BLce1Pet1F +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RtfvVynfSX +++ cat /tmp/tmp.BLce1Pet1F +++ rm /tmp/tmp.RtfvVynfSX /tmp/tmp.BLce1Pet1F +++ return 0 ++ echo 1771513388 + latest_ts=1771513388 + retries=5 ++ format_date 1771513388 ++ local timestamp=1771513388 +++ TZ=UTC +++ /usr/sbin/date -d@1771513388 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:08 ++ format_date 1771513421 ++ local timestamp=1771513421 +++ TZ=UTC +++ /usr/sbin/date -d@1771513421 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:41 + echo 'Waiting for last oplog chunk (2026-02-19 15:03:08) to be greater than restore target (2026-02-19 15:03:41)' Waiting for last oplog chunk (2026-02-19 15:03:08) to be greater than restore target (2026-02-19 15:03:41) + sleep 10 + [[ 1771513388 -gt 1771513421 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5TFoIvlIKV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IxkRjFUJYt +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.5TFoIvlIKV +++ cat /tmp/tmp.IxkRjFUJYt +++ rm /tmp/tmp.5TFoIvlIKV /tmp/tmp.IxkRjFUJYt +++ return 0 ++ echo 1771513388 + latest_ts=1771513388 + retries=6 ++ format_date 1771513388 ++ local timestamp=1771513388 +++ TZ=UTC +++ /usr/sbin/date -d@1771513388 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:08 ++ format_date 1771513421 ++ local timestamp=1771513421 +++ TZ=UTC +++ /usr/sbin/date -d@1771513421 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:41 + echo 'Waiting for last oplog chunk (2026-02-19 15:03:08) to be greater than restore target (2026-02-19 15:03:41)' Waiting for last oplog chunk (2026-02-19 15:03:08) to be greater than restore target (2026-02-19 15:03:41) + sleep 10 + [[ 1771513388 -gt 1771513421 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.b3nKfx6xKF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.32IqxeIs16 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.b3nKfx6xKF +++ cat /tmp/tmp.32IqxeIs16 +++ rm /tmp/tmp.b3nKfx6xKF /tmp/tmp.32IqxeIs16 +++ return 0 ++ echo 1771513388 + latest_ts=1771513388 + retries=7 ++ format_date 1771513388 ++ local timestamp=1771513388 +++ TZ=UTC +++ /usr/sbin/date -d@1771513388 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:08 ++ format_date 1771513421 ++ local timestamp=1771513421 +++ TZ=UTC +++ /usr/sbin/date -d@1771513421 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:41 + echo 'Waiting for last oplog chunk (2026-02-19 15:03:08) to be greater than restore target (2026-02-19 15:03:41)' Waiting for last oplog chunk (2026-02-19 15:03:08) to be greater than restore target (2026-02-19 15:03:41) + sleep 10 + [[ 1771513388 -gt 1771513421 ]] + [[ 7 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hHcK8vAxlJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3U7sO5hytQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.hHcK8vAxlJ +++ cat /tmp/tmp.3U7sO5hytQ +++ rm /tmp/tmp.hHcK8vAxlJ /tmp/tmp.3U7sO5hytQ +++ return 0 ++ echo 1771513388 + latest_ts=1771513388 + retries=8 ++ format_date 1771513388 ++ local timestamp=1771513388 +++ TZ=UTC +++ /usr/sbin/date -d@1771513388 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:08 ++ format_date 1771513421 ++ local timestamp=1771513421 +++ TZ=UTC +++ /usr/sbin/date -d@1771513421 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:41 + echo 'Waiting for last oplog chunk (2026-02-19 15:03:08) to be greater than restore target (2026-02-19 15:03:41)' Waiting for last oplog chunk (2026-02-19 15:03:08) to be greater than restore target (2026-02-19 15:03:41) + sleep 10 + [[ 1771513388 -gt 1771513421 ]] + [[ 8 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QAMNgj3HPJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.reZ2Tv9Ocl +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.QAMNgj3HPJ +++ cat /tmp/tmp.reZ2Tv9Ocl +++ rm /tmp/tmp.QAMNgj3HPJ /tmp/tmp.reZ2Tv9Ocl +++ return 0 ++ echo 1771513394 + latest_ts=1771513394 + retries=9 ++ format_date 1771513394 ++ local timestamp=1771513394 +++ TZ=UTC +++ /usr/sbin/date -d@1771513394 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:14 ++ format_date 1771513421 ++ local timestamp=1771513421 +++ TZ=UTC +++ /usr/sbin/date -d@1771513421 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:41 + echo 'Waiting for last oplog chunk (2026-02-19 15:03:14) to be greater than restore target (2026-02-19 15:03:41)' Waiting for last oplog chunk (2026-02-19 15:03:14) to be greater than restore target (2026-02-19 15:03:41) + sleep 10 + [[ 1771513394 -gt 1771513421 ]] + [[ 9 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vPaGtgZ9BA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VUne6QBvwK +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.vPaGtgZ9BA +++ cat /tmp/tmp.VUne6QBvwK +++ rm /tmp/tmp.vPaGtgZ9BA /tmp/tmp.VUne6QBvwK +++ return 0 ++ echo 1771513507 + latest_ts=1771513507 + retries=10 ++ format_date 1771513507 ++ local timestamp=1771513507 +++ TZ=UTC +++ /usr/sbin/date -d@1771513507 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:05:07 ++ format_date 1771513421 ++ local timestamp=1771513421 +++ TZ=UTC +++ /usr/sbin/date -d@1771513421 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-19 15:03:41 + echo 'Waiting for last oplog chunk (2026-02-19 15:05:07) to be greater than restore target (2026-02-19 15:03:41)' Waiting for last oplog chunk (2026-02-19 15:05:07) to be greater than restore target (2026-02-19 15:03:41) + sleep 10 + [[ 1771513507 -gt 1771513421 ]] + '[' -z backupSource ']' + desc 'check restore by date backupSource' + set +o xtrace ----------------------------------------------------------------------------------- check restore by date backupSource ----------------------------------------------------------------------------------- ++ get_backup_dest backup-minio-1 ++ local backup_name=backup-minio-1 ++ kubectl_bin get psmdb-backup backup-minio-1 -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ mktemp ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.SWdBWEtqLr +++ mktemp ++ local LAST_ERR=/tmp/tmp.f5vXLho6nS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-1 -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SWdBWEtqLr ++ cat /tmp/tmp.f5vXLho6nS ++ rm /tmp/tmp.SWdBWEtqLr /tmp/tmp.f5vXLho6nS ++ return 0 + backup_dest=operator-testing/2026-02-19T15:00:40Z + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2247/e2e-tests/pitr-physical-backup-source/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-1/' + /usr/sbin/sed -e /backupName/d + /usr/sbin/sed -e 's/pitrType:/type: date/' + '[' -z 1771513421 ']' + /usr/sbin/sed -e 's|DESTINATION|operator-testing/2026-02-19T15:00:40Z|' ++ format_date 1771513421 ++ local timestamp=1771513421 ++ get_bucket_name backup-minio-1 ++ local backup_name=backup-minio-1 ++ kubectl_bin get psmdb-backup backup-minio-1 -o 'jsonpath={.status.s3.bucket}' + '[' -n '' ']' + yq +++ TZ=UTC +++ /usr/sbin/date -d@1771513421 '+%Y-%m-%d %H:%M:%S' +++ mktemp + kubectl_bin apply -f - ++ local LAST_OUT=/tmp/tmp.Wh69TzpDm9 ++ mktemp +++ mktemp ++ echo 2026-02-19 15:03:41 + /usr/sbin/sed -e 's/date:/date: 2026-02-19 15:03:41/' + local LAST_OUT=/tmp/tmp.rSeQGPxOML ++ local LAST_ERR=/tmp/tmp.yksX0LxPNs ++ local exit_status=0 ++ local timeout=4 ++ mktemp +++ seq 0 2 + local LAST_ERR=/tmp/tmp.LXEQnHbwLh + local exit_status=0 + local timeout=4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-1 -o 'jsonpath={.status.s3.bucket}' ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Wh69TzpDm9 ++ cat /tmp/tmp.yksX0LxPNs ++ rm /tmp/tmp.Wh69TzpDm9 /tmp/tmp.yksX0LxPNs ++ return 0 + /usr/sbin/sed -e 's|BUCKET-NAME|operator-testing|' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rSeQGPxOML perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-1 created + cat /tmp/tmp.LXEQnHbwLh + rm /tmp/tmp.rSeQGPxOML /tmp/tmp.LXEQnHbwLh + return 0 + wait_restore backup-minio-1 some-name requested 0 1200 + local backup_name=backup-minio-1 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-1 object to be created.OK Waiting psmdb-restore/restore-backup-minio-1 to reach state "requested" .................... ----------------------------------------------------------------------------------- pbm-agent logs from some-name-rs0-0 ----------------------------------------------------------------------------------- + kubectl_bin exec some-name-rs0-0 -- cat /tmp/pbm-agent.log ++ mktemp + local LAST_OUT=/tmp/tmp.y2tZvsdqj0 ++ mktemp + local LAST_ERR=/tmp/tmp.NzWp7YTyHG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs0-0 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.y2tZvsdqj0 + cat /tmp/tmp.NzWp7YTyHG Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 0 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs0-0 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.y2tZvsdqj0 + cat /tmp/tmp.NzWp7YTyHG Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 4 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs0-0 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.y2tZvsdqj0 + cat /tmp/tmp.NzWp7YTyHG Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 8 + cat /tmp/tmp.y2tZvsdqj0 + cat /tmp/tmp.NzWp7YTyHG Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + rm /tmp/tmp.y2tZvsdqj0 /tmp/tmp.NzWp7YTyHG + return 1 + echo 'failed to get /tmp/pbm-agent.log from some-name-rs0-0' failed to get /tmp/pbm-agent.log from some-name-rs0-0 + for pod in $(get_mongod_pods ${cluster}) + desc 'pbm-agent logs from some-name-rs0-1' + set +o xtrace ----------------------------------------------------------------------------------- pbm-agent logs from some-name-rs0-1 ----------------------------------------------------------------------------------- + kubectl_bin exec some-name-rs0-1 -- cat /tmp/pbm-agent.log ++ mktemp + local LAST_OUT=/tmp/tmp.21pgUA9r07 ++ mktemp + local LAST_ERR=/tmp/tmp.zcM6DDF7Wb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs0-1 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.21pgUA9r07 + cat /tmp/tmp.zcM6DDF7Wb Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 0 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs0-1 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.21pgUA9r07 + cat /tmp/tmp.zcM6DDF7Wb Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 4 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs0-1 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.21pgUA9r07 + cat /tmp/tmp.zcM6DDF7Wb Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 8 + cat /tmp/tmp.21pgUA9r07 + cat /tmp/tmp.zcM6DDF7Wb Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + rm /tmp/tmp.21pgUA9r07 /tmp/tmp.zcM6DDF7Wb + return 1 + echo 'failed to get /tmp/pbm-agent.log from some-name-rs0-1' failed to get /tmp/pbm-agent.log from some-name-rs0-1 + for pod in $(get_mongod_pods ${cluster}) + desc 'pbm-agent logs from some-name-rs0-2' + set +o xtrace ----------------------------------------------------------------------------------- pbm-agent logs from some-name-rs0-2 ----------------------------------------------------------------------------------- + kubectl_bin exec some-name-rs0-2 -- cat /tmp/pbm-agent.log ++ mktemp + local LAST_OUT=/tmp/tmp.UkHbJrp0GU ++ mktemp + local LAST_ERR=/tmp/tmp.W9HpUXMhef + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs0-2 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.UkHbJrp0GU + cat /tmp/tmp.W9HpUXMhef Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 0 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs0-2 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.UkHbJrp0GU + cat /tmp/tmp.W9HpUXMhef Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 4 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs0-2 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.UkHbJrp0GU + cat /tmp/tmp.W9HpUXMhef Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 8 + cat /tmp/tmp.UkHbJrp0GU + cat /tmp/tmp.W9HpUXMhef Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + rm /tmp/tmp.UkHbJrp0GU /tmp/tmp.W9HpUXMhef + return 1 + echo 'failed to get /tmp/pbm-agent.log from some-name-rs0-2' failed to get /tmp/pbm-agent.log from some-name-rs0-2 + for pod in $(get_mongod_pods ${cluster}) + desc 'pbm-agent logs from some-name-rs1-0' + set +o xtrace ----------------------------------------------------------------------------------- pbm-agent logs from some-name-rs1-0 ----------------------------------------------------------------------------------- + kubectl_bin exec some-name-rs1-0 -- cat /tmp/pbm-agent.log ++ mktemp + local LAST_OUT=/tmp/tmp.XWn57RyTJ3 ++ mktemp + local LAST_ERR=/tmp/tmp.v9Dk1WRETp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs1-0 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.XWn57RyTJ3 + cat /tmp/tmp.v9Dk1WRETp Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 0 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs1-0 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.XWn57RyTJ3 + cat /tmp/tmp.v9Dk1WRETp Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 4 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs1-0 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.XWn57RyTJ3 + cat /tmp/tmp.v9Dk1WRETp Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 8 + cat /tmp/tmp.XWn57RyTJ3 + cat /tmp/tmp.v9Dk1WRETp Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + rm /tmp/tmp.XWn57RyTJ3 /tmp/tmp.v9Dk1WRETp + return 1 + echo 'failed to get /tmp/pbm-agent.log from some-name-rs1-0' failed to get /tmp/pbm-agent.log from some-name-rs1-0 + for pod in $(get_mongod_pods ${cluster}) + desc 'pbm-agent logs from some-name-rs1-1' + set +o xtrace ----------------------------------------------------------------------------------- pbm-agent logs from some-name-rs1-1 ----------------------------------------------------------------------------------- + kubectl_bin exec some-name-rs1-1 -- cat /tmp/pbm-agent.log ++ mktemp + local LAST_OUT=/tmp/tmp.duG4YeZ3Ij ++ mktemp + local LAST_ERR=/tmp/tmp.qA3wno0ijS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs1-1 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.duG4YeZ3Ij + cat /tmp/tmp.qA3wno0ijS Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 0 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs1-1 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.duG4YeZ3Ij + cat /tmp/tmp.qA3wno0ijS Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 4 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs1-1 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.duG4YeZ3Ij + cat /tmp/tmp.qA3wno0ijS Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 8 + cat /tmp/tmp.duG4YeZ3Ij + cat /tmp/tmp.qA3wno0ijS Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + rm /tmp/tmp.duG4YeZ3Ij /tmp/tmp.qA3wno0ijS + return 1 + echo 'failed to get /tmp/pbm-agent.log from some-name-rs1-1' failed to get /tmp/pbm-agent.log from some-name-rs1-1 + for pod in $(get_mongod_pods ${cluster}) + desc 'pbm-agent logs from some-name-rs1-2' + set +o xtrace ----------------------------------------------------------------------------------- pbm-agent logs from some-name-rs1-2 ----------------------------------------------------------------------------------- + kubectl_bin exec some-name-rs1-2 -- cat /tmp/pbm-agent.log ++ mktemp + local LAST_OUT=/tmp/tmp.GLVVuInCd8 ++ mktemp + local LAST_ERR=/tmp/tmp.STOkGqnXQ5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs1-2 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.GLVVuInCd8 + cat /tmp/tmp.STOkGqnXQ5 Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 0 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs1-2 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.GLVVuInCd8 + cat /tmp/tmp.STOkGqnXQ5 Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 4 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs1-2 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.GLVVuInCd8 + cat /tmp/tmp.STOkGqnXQ5 Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 8 + cat /tmp/tmp.GLVVuInCd8 + cat /tmp/tmp.STOkGqnXQ5 Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + rm /tmp/tmp.GLVVuInCd8 /tmp/tmp.STOkGqnXQ5 + return 1 + echo 'failed to get /tmp/pbm-agent.log from some-name-rs1-2' failed to get /tmp/pbm-agent.log from some-name-rs1-2 + for pod in $(get_mongod_pods ${cluster}) + desc 'pbm-agent logs from some-name-rs2-0' + set +o xtrace ----------------------------------------------------------------------------------- pbm-agent logs from some-name-rs2-0 ----------------------------------------------------------------------------------- + kubectl_bin exec some-name-rs2-0 -- cat /tmp/pbm-agent.log ++ mktemp + local LAST_OUT=/tmp/tmp.SsfnmjkZvE ++ mktemp + local LAST_ERR=/tmp/tmp.ZkMmjC978L + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs2-0 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.SsfnmjkZvE + cat /tmp/tmp.ZkMmjC978L Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 0 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs2-0 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.SsfnmjkZvE + cat /tmp/tmp.ZkMmjC978L Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 4 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs2-0 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.SsfnmjkZvE + cat /tmp/tmp.ZkMmjC978L Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 8 + cat /tmp/tmp.SsfnmjkZvE + cat /tmp/tmp.ZkMmjC978L Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + rm /tmp/tmp.SsfnmjkZvE /tmp/tmp.ZkMmjC978L + return 1 + echo 'failed to get /tmp/pbm-agent.log from some-name-rs2-0' failed to get /tmp/pbm-agent.log from some-name-rs2-0 + for pod in $(get_mongod_pods ${cluster}) + desc 'pbm-agent logs from some-name-rs2-1' + set +o xtrace ----------------------------------------------------------------------------------- pbm-agent logs from some-name-rs2-1 ----------------------------------------------------------------------------------- + kubectl_bin exec some-name-rs2-1 -- cat /tmp/pbm-agent.log ++ mktemp + local LAST_OUT=/tmp/tmp.i2trdfY73e ++ mktemp + local LAST_ERR=/tmp/tmp.MQir0Gylxh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs2-1 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.i2trdfY73e + cat /tmp/tmp.MQir0Gylxh Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 0 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs2-1 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.i2trdfY73e + cat /tmp/tmp.MQir0Gylxh Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 4 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs2-1 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.i2trdfY73e + cat /tmp/tmp.MQir0Gylxh Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 8 + cat /tmp/tmp.i2trdfY73e + cat /tmp/tmp.MQir0Gylxh Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + rm /tmp/tmp.i2trdfY73e /tmp/tmp.MQir0Gylxh + return 1 + echo 'failed to get /tmp/pbm-agent.log from some-name-rs2-1' failed to get /tmp/pbm-agent.log from some-name-rs2-1 + for pod in $(get_mongod_pods ${cluster}) + desc 'pbm-agent logs from some-name-rs2-2' + set +o xtrace ----------------------------------------------------------------------------------- pbm-agent logs from some-name-rs2-2 ----------------------------------------------------------------------------------- + kubectl_bin exec some-name-rs2-2 -- cat /tmp/pbm-agent.log ++ mktemp + local LAST_OUT=/tmp/tmp.czTnKNdoz0 ++ mktemp + local LAST_ERR=/tmp/tmp.zrHTXVQlAy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs2-2 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.czTnKNdoz0 + cat /tmp/tmp.zrHTXVQlAy Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 0 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs2-2 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.czTnKNdoz0 + cat /tmp/tmp.zrHTXVQlAy Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 4 + for i in $(seq 0 2) + set +e + kubectl exec some-name-rs2-2 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.czTnKNdoz0 + cat /tmp/tmp.zrHTXVQlAy Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + sleep 8 + cat /tmp/tmp.czTnKNdoz0 + cat /tmp/tmp.zrHTXVQlAy Defaulted container "mongod" out of: mongod, backup-agent, mongo-init (init) cat: /tmp/pbm-agent.log: No such file or directory command terminated with exit code 1 + rm /tmp/tmp.czTnKNdoz0 /tmp/tmp.zrHTXVQlAy + return 1 + echo 'failed to get /tmp/pbm-agent.log from some-name-rs2-2' failed to get /tmp/pbm-agent.log from some-name-rs2-2 + grep restore-backup-minio-1 + tail -100 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.DjYiIKwzNs +++ mktemp ++ local LAST_ERR=/tmp/tmp.ddu28Zdv2e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DjYiIKwzNs ++ cat /tmp/tmp.ddu28Zdv2e ++ rm /tmp/tmp.DjYiIKwzNs /tmp/tmp.ddu28Zdv2e ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-64bc9b5fc9-mmkzl ++ mktemp + local LAST_OUT=/tmp/tmp.3ERpnaJucr ++ mktemp + local LAST_ERR=/tmp/tmp.iT19WEoZ3g + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-64bc9b5fc9-mmkzl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3ERpnaJucr + cat /tmp/tmp.iT19WEoZ3g + rm /tmp/tmp.3ERpnaJucr /tmp/tmp.iT19WEoZ3g + return 0 2026-02-19T15:24:39.473Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "f0b6bf05-4e31-47e7-a0df-fff8067c32cc", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:24:39.475Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "f0b6bf05-4e31-47e7-a0df-fff8067c32cc", "ready": false} 2026-02-19T15:24:44.476Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "a3d22174-7ec1-4af4-8786-c7d2193d87b2", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:24:44.477Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "a3d22174-7ec1-4af4-8786-c7d2193d87b2", "ready": false} 2026-02-19T15:24:49.478Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "340543e1-804c-4601-9d4f-c434780f0540", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:24:49.480Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "340543e1-804c-4601-9d4f-c434780f0540", "ready": false} 2026-02-19T15:24:54.481Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "1d59effe-e79f-4540-934a-dd93ce7fc9d5", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:24:54.482Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "1d59effe-e79f-4540-934a-dd93ce7fc9d5", "ready": false} 2026-02-19T15:24:59.483Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "855488db-c8e5-4224-93ed-6ae54b586b19", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:24:59.485Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "855488db-c8e5-4224-93ed-6ae54b586b19", "ready": false} 2026-02-19T15:25:04.486Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "ec538a3b-57f9-4a6d-8ae7-e294127ea281", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:25:04.488Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "ec538a3b-57f9-4a6d-8ae7-e294127ea281", "ready": false} 2026-02-19T15:25:09.488Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "bb8e7f16-ad8a-4ae6-8038-dbdee141375b", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:25:09.491Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "bb8e7f16-ad8a-4ae6-8038-dbdee141375b", "ready": false} 2026-02-19T15:25:14.491Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "fc09b052-87de-4761-9c87-58ff4d16e4c7", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:25:14.494Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "fc09b052-87de-4761-9c87-58ff4d16e4c7", "ready": false} 2026-02-19T15:25:19.494Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "6ed82eda-79fd-4e35-8a45-adff750de73e", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:25:19.496Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "6ed82eda-79fd-4e35-8a45-adff750de73e", "ready": false} 2026-02-19T15:25:24.497Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "512c5755-920f-4a4f-9459-3256d470f65a", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:25:24.500Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "512c5755-920f-4a4f-9459-3256d470f65a", "ready": false} 2026-02-19T15:25:29.501Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "299e1a4b-f131-4b26-b92d-d3509b4b5f7a", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:25:29.502Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "299e1a4b-f131-4b26-b92d-d3509b4b5f7a", "ready": false} 2026-02-19T15:25:34.503Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "c0629919-4487-459b-839b-f852fc1d8bc7", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:25:34.505Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "c0629919-4487-459b-839b-f852fc1d8bc7", "ready": false} 2026-02-19T15:25:39.506Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "840fd82d-1909-4cae-9ec3-762c3ec012e7", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:25:39.507Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "840fd82d-1909-4cae-9ec3-762c3ec012e7", "ready": false} 2026-02-19T15:25:44.508Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "1441b721-0569-4826-88c2-fc1d85f50266", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:25:44.511Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "1441b721-0569-4826-88c2-fc1d85f50266", "ready": false} 2026-02-19T15:25:49.511Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "74ede34c-f959-4d00-aad2-35783e2f5431", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:25:49.513Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "74ede34c-f959-4d00-aad2-35783e2f5431", "ready": false} 2026-02-19T15:25:54.513Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "54c3b8e3-76fa-4b14-821b-7fba330734c3", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:25:54.515Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "54c3b8e3-76fa-4b14-821b-7fba330734c3", "ready": false} 2026-02-19T15:25:59.515Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "f7fb9624-203a-475d-a27b-38286855f085", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:25:59.517Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "f7fb9624-203a-475d-a27b-38286855f085", "ready": false} 2026-02-19T15:26:04.518Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "4c4942d8-a6b7-42d1-9fb9-7d47f1bab2f6", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:26:04.520Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "4c4942d8-a6b7-42d1-9fb9-7d47f1bab2f6", "ready": false} 2026-02-19T15:26:09.521Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "8a47fb14-5627-41d9-946e-35729bd5edb4", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:26:09.524Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "8a47fb14-5627-41d9-946e-35729bd5edb4", "ready": false} 2026-02-19T15:26:14.525Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "0fc1e3f6-5582-4828-8e70-441f104688fb", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:26:14.527Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "0fc1e3f6-5582-4828-8e70-441f104688fb", "ready": false} 2026-02-19T15:26:19.528Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "9e9533c2-b016-4101-a850-b19a73e50c3c", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:26:19.530Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "9e9533c2-b016-4101-a850-b19a73e50c3c", "ready": false} 2026-02-19T15:26:24.531Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "a48a0a05-8099-4fa6-a8ae-eefca9903f09", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:26:24.533Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "a48a0a05-8099-4fa6-a8ae-eefca9903f09", "ready": false} 2026-02-19T15:26:29.534Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "889b3c9b-ea1b-4ec2-8d41-123633e372ce", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:26:29.536Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "889b3c9b-ea1b-4ec2-8d41-123633e372ce", "ready": false} 2026-02-19T15:26:34.536Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "86dbcc11-b35b-4dfb-a580-00899699a6bb", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:26:34.539Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "86dbcc11-b35b-4dfb-a580-00899699a6bb", "ready": false} 2026-02-19T15:26:39.540Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "3af2ee25-79a0-442c-921d-d00074240892", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:26:39.541Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "3af2ee25-79a0-442c-921d-d00074240892", "ready": false} 2026-02-19T15:26:44.542Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "d0bcb9f2-8663-4aed-a6c6-4252c1b36614", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:26:44.543Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "d0bcb9f2-8663-4aed-a6c6-4252c1b36614", "ready": false} 2026-02-19T15:26:49.544Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "073df305-fec3-4885-8efd-bf8a3bd138c4", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:26:49.546Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "073df305-fec3-4885-8efd-bf8a3bd138c4", "ready": false} 2026-02-19T15:26:54.548Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "d9c76821-3b2d-4610-9834-945aeccf966a", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:26:54.550Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "d9c76821-3b2d-4610-9834-945aeccf966a", "ready": false} 2026-02-19T15:26:59.551Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "f3d254b0-26ce-429e-a51a-b8b0fe1ed489", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:26:59.553Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "f3d254b0-26ce-429e-a51a-b8b0fe1ed489", "ready": false} 2026-02-19T15:27:04.554Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "bc9bac23-5917-43ba-96df-385b3e42075a", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:27:04.555Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "bc9bac23-5917-43ba-96df-385b3e42075a", "ready": false} 2026-02-19T15:27:09.556Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "d5baf49e-10e2-48c1-b32d-0e24baf00023", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:27:09.557Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "d5baf49e-10e2-48c1-b32d-0e24baf00023", "ready": false} 2026-02-19T15:27:14.558Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "2add0ded-64a7-47f3-a8f6-2d656b8e5750", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:27:14.560Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "2add0ded-64a7-47f3-a8f6-2d656b8e5750", "ready": false} 2026-02-19T15:27:19.561Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "b77f7469-00ba-4280-b207-82ac9bce2c39", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:27:19.562Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "b77f7469-00ba-4280-b207-82ac9bce2c39", "ready": false} 2026-02-19T15:27:24.564Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "76bf1375-3c0f-4926-bfb7-7827a7218fc4", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:27:24.571Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "76bf1375-3c0f-4926-bfb7-7827a7218fc4", "ready": false} 2026-02-19T15:27:29.572Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "af27136d-aea4-40c7-a2e7-e6a4529909b5", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:27:29.573Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "af27136d-aea4-40c7-a2e7-e6a4529909b5", "ready": false} 2026-02-19T15:27:34.574Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "9522ebb5-cee0-48c1-8f64-eb7313fad751", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:27:34.576Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "9522ebb5-cee0-48c1-8f64-eb7313fad751", "ready": false} 2026-02-19T15:27:39.577Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "a5984819-05d1-4946-a854-8e9b7ceaf05a", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:27:39.579Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "a5984819-05d1-4946-a854-8e9b7ceaf05a", "ready": false} 2026-02-19T15:27:44.580Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "6aba978a-68c3-478c-abde-9ce61278f27f", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:27:44.582Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "6aba978a-68c3-478c-abde-9ce61278f27f", "ready": false} 2026-02-19T15:27:49.582Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "03e96dd9-633b-44b5-ab5c-6a1009c692b7", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:27:49.584Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "03e96dd9-633b-44b5-ab5c-6a1009c692b7", "ready": false} 2026-02-19T15:27:54.584Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "834fc1c6-79da-40f1-b88a-bcf2281896c9", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:27:54.586Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "834fc1c6-79da-40f1-b88a-bcf2281896c9", "ready": false} 2026-02-19T15:27:59.586Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "acc67177-9b7d-4b9c-9755-4b3c82f12497", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:27:59.588Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "acc67177-9b7d-4b9c-9755-4b3c82f12497", "ready": false} 2026-02-19T15:28:04.589Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "4b4c672f-1d7c-4ca9-9ff6-2759bb51eea8", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:28:04.590Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "4b4c672f-1d7c-4ca9-9ff6-2759bb51eea8", "ready": false} 2026-02-19T15:28:09.591Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "13da9cfe-8d2a-49ec-ab1c-b32821733971", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:28:09.593Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "13da9cfe-8d2a-49ec-ab1c-b32821733971", "ready": false} 2026-02-19T15:28:14.593Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "4e281086-b56e-4b42-848e-c858757bc161", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:28:14.595Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "4e281086-b56e-4b42-848e-c858757bc161", "ready": false} 2026-02-19T15:28:19.595Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "50d0fdf7-dd75-49c2-8c70-8f089a9872a0", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:28:19.597Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "50d0fdf7-dd75-49c2-8c70-8f089a9872a0", "ready": false} 2026-02-19T15:28:24.598Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "fb7c6b50-de2f-4287-9454-4c52d628cad7", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:28:24.600Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "fb7c6b50-de2f-4287-9454-4c52d628cad7", "ready": false} 2026-02-19T15:28:29.601Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "ca1debe1-59d2-4a08-be8e-db3bd005f48f", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:28:29.603Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "ca1debe1-59d2-4a08-be8e-db3bd005f48f", "ready": false} 2026-02-19T15:28:34.604Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "5233396f-1555-4927-abed-bb5a956cbb38", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:28:34.606Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "5233396f-1555-4927-abed-bb5a956cbb38", "ready": false} 2026-02-19T15:28:39.607Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "cdc25577-a582-41d8-9a39-eeaf6dd4b22f", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:28:39.609Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "cdc25577-a582-41d8-9a39-eeaf6dd4b22f", "ready": false} 2026-02-19T15:28:44.610Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "48e3a15a-279a-4f93-9870-08b7995cae34", "cluster": "some-name", "namespace": "pitr-physical-backup-source-22129"} 2026-02-19T15:28:44.611Z INFO Waiting for statefulsets to be ready before restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"}, "namespace": "pitr-physical-backup-source-22129", "name": "restore-backup-minio-1", "reconcileID": "48e3a15a-279a-4f93-9870-08b7995cae34", "ready": false} + kubectl_bin get psmdb-restore restore-backup-minio-1 -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.WAxQ94YKYp ++ mktemp + local LAST_ERR=/tmp/tmp.FnWFdsX7LW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb-restore restore-backup-minio-1 -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WAxQ94YKYp apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDBRestore metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDBRestore","metadata":{"annotations":{},"name":"restore-backup-minio-1","namespace":"pitr-physical-backup-source-22129"},"spec":{"backupSource":{"destination":"s3://operator-testing/2026-02-19T15:00:40Z","s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"physical"},"clusterName":"some-name","pitr":{"date":"2026-02-19 15:03:41","type":"date"}}} creationTimestamp: "2026-02-19T15:05:49Z" generation: 1 name: restore-backup-minio-1 namespace: pitr-physical-backup-source-22129 resourceVersion: "1771513568195551001" uid: ad07355b-4cb8-4c9f-a52a-581586d5d627 spec: backupSource: destination: s3://operator-testing/2026-02-19T15:00:40Z s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: physical clusterName: some-name pitr: date: "2026-02-19 15:03:41" type: date status: pitrTarget: 2026-02-19T15:03:41 state: waiting + cat /tmp/tmp.FnWFdsX7LW + rm /tmp/tmp.WAxQ94YKYp /tmp/tmp.FnWFdsX7LW + return 0 + kubectl_bin describe psmdb-restore restore-backup-minio-1 ++ mktemp + local LAST_OUT=/tmp/tmp.wROgaa7Lg6 ++ mktemp + local LAST_ERR=/tmp/tmp.p4EvxMueSP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl describe psmdb-restore restore-backup-minio-1 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wROgaa7Lg6 Name: restore-backup-minio-1 Namespace: pitr-physical-backup-source-22129 Labels: Annotations: API Version: psmdb.percona.com/v1 Kind: PerconaServerMongoDBRestore Metadata: Creation Timestamp: 2026-02-19T15:05:49Z Generation: 1 Resource Version: 1771513568195551001 UID: ad07355b-4cb8-4c9f-a52a-581586d5d627 Spec: Backup Source: Destination: s3://operator-testing/2026-02-19T15:00:40Z s3: Bucket: operator-testing Credentials Secret: minio-secret Endpoint URL: http://minio-service:9000/ Insecure Skip Tls Verify: false Region: us-east-1 Type: physical Cluster Name: some-name Pitr: Date: 2026-02-19 15:03:41 Type: date Status: Pitr Target: 2026-02-19T15:03:41 State: waiting Events: + cat /tmp/tmp.p4EvxMueSP + rm /tmp/tmp.wROgaa7Lg6 /tmp/tmp.p4EvxMueSP + return 0 + echo 'Restore object restore-backup-minio-1 is in waiting state after 20 minutes.' Restore object restore-backup-minio-1 is in waiting state after 20 minutes. + echo something went wrong with operator or kubernetes cluster something went wrong with operator or kubernetes cluster + exit 1