Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/logs/pitr-physical.log WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + main + create_infra pitr-physical-5149 + local ns=pitr-physical-5149 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.dKVHcte22t ++ mktemp + local LAST_ERR=/tmp/tmp.iSUwhw6Ybn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dKVHcte22t customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.iSUwhw6Ybn + rm /tmp/tmp.dKVHcte22t /tmp/tmp.iSUwhw6Ybn + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n pitr-physical-2225 backup-minio-2 --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-minio-2 patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n pitr-physical-2225 backup-minio-3 --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-minio-3 patched + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.wsMMyex8gY ++ mktemp + local LAST_ERR=/tmp/tmp.yQM7c2m7uS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wsMMyex8gY customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com condition met + cat /tmp/tmp.yQM7c2m7uS + rm /tmp/tmp.wsMMyex8gY /tmp/tmp.yQM7c2m7uS + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.oxQkSy9PRD ++ mktemp + local LAST_ERR=/tmp/tmp.eOyB8sZKVK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oxQkSy9PRD + cat /tmp/tmp.eOyB8sZKVK + rm /tmp/tmp.oxQkSy9PRD /tmp/tmp.eOyB8sZKVK + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.WunkyPJUNi ++ mktemp + local LAST_ERR=/tmp/tmp.gWxbfs0wc3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WunkyPJUNi + cat /tmp/tmp.gWxbfs0wc3 + rm /tmp/tmp.WunkyPJUNi /tmp/tmp.gWxbfs0wc3 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.Zr49flF4ME ++ mktemp + local LAST_ERR=/tmp/tmp.qDQeOcSRao + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Zr49flF4ME clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.qDQeOcSRao + rm /tmp/tmp.Zr49flF4ME /tmp/tmp.qDQeOcSRao + return 0 + check_crd_for_deletion PR-1961-db56ee7c + local git_tag=PR-1961-db56ee7c ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1961-db56ee7c/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/bin/sed s/---//g + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DiAKpkStSW +++ mktemp ++ local LAST_ERR=/tmp/tmp.tMcIcljk0h ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.DiAKpkStSW ++ cat /tmp/tmp.tMcIcljk0h Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.DiAKpkStSW ++ cat /tmp/tmp.tMcIcljk0h Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.DiAKpkStSW ++ cat /tmp/tmp.tMcIcljk0h Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.DiAKpkStSW ++ cat /tmp/tmp.tMcIcljk0h Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.DiAKpkStSW /tmp/tmp.tMcIcljk0h ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh.org ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrole ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + awk '{print$1}' + local LAST_OUT=/tmp/tmp.Tdzdq7lRYI + xargs kubectl delete ns + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.pGsPd2fiw4 ++ mktemp + local LAST_ERR=/tmp/tmp.1485NepyIF + local exit_status=0 + local timeout=4 ++ mktemp + local LAST_ERR=/tmp/tmp.hneEVyG341 + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pGsPd2fiw4 + cat /tmp/tmp.hneEVyG341 + rm /tmp/tmp.pGsPd2fiw4 /tmp/tmp.hneEVyG341 + return 0 namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted namespace "pitr-physical-2225" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Tdzdq7lRYI namespace "psmdb-operator" deleted + cat /tmp/tmp.1485NepyIF + rm /tmp/tmp.Tdzdq7lRYI /tmp/tmp.1485NepyIF + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.N5q1A9S8Uj ++ mktemp + local LAST_ERR=/tmp/tmp.THCrbNqjMC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.N5q1A9S8Uj + cat /tmp/tmp.THCrbNqjMC + rm /tmp/tmp.N5q1A9S8Uj /tmp/tmp.THCrbNqjMC + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.1TyBVKq2ZT ++ mktemp + local LAST_ERR=/tmp/tmp.Ep5icAofvN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1TyBVKq2ZT namespace/psmdb-operator created + cat /tmp/tmp.Ep5icAofvN + rm /tmp/tmp.1TyBVKq2ZT /tmp/tmp.Ep5icAofvN + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.P11qEX8ivb +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZIggM2duK4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.P11qEX8ivb ++ cat /tmp/tmp.ZIggM2duK4 ++ rm /tmp/tmp.P11qEX8ivb /tmp/tmp.ZIggM2duK4 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-db56ee7c-3-cluster9 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.0kixY4FrRU ++ mktemp + local LAST_ERR=/tmp/tmp.Y4Xb4SXFO6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-db56ee7c-3-cluster9 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0kixY4FrRU Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-db56ee7c-3-cluster9" modified. + cat /tmp/tmp.Y4Xb4SXFO6 + rm /tmp/tmp.0kixY4FrRU /tmp/tmp.Y4Xb4SXFO6 + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.YinBvA1Ka7 ++ mktemp + local LAST_ERR=/tmp/tmp.lRLxaJ9rdo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YinBvA1Ka7 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.lRLxaJ9rdo + rm /tmp/tmp.YinBvA1Ka7 /tmp/tmp.lRLxaJ9rdo + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: psmdb-operator^' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.ftcgcDPo8q ++ mktemp + local LAST_ERR=/tmp/tmp.UnN5CaudtU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ftcgcDPo8q clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.UnN5CaudtU + rm /tmp/tmp.ftcgcDPo8q /tmp/tmp.UnN5CaudtU + return 0 + kubectl_bin apply -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1961-db56ee7c") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.wHIWzvj89y ++ mktemp + local LAST_ERR=/tmp/tmp.bxNyMtNOxm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wHIWzvj89y deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.bxNyMtNOxm + rm /tmp/tmp.wHIWzvj89y /tmp/tmp.bxNyMtNOxm + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.9CzzMw037r +++ mktemp ++ local LAST_ERR=/tmp/tmp.VVyQMVrtKQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9CzzMw037r ++ cat /tmp/tmp.VVyQMVrtKQ ++ rm /tmp/tmp.9CzzMw037r /tmp/tmp.VVyQMVrtKQ ++ return 0 + wait_pod percona-server-mongodb-operator-d49c7dc6d-mzznv + local pod=percona-server-mongodb-operator-d49c7dc6d-mzznv + set +o xtrace waiting for pod/percona-server-mongodb-operator-d49c7dc6d-mzznv to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.oKqGOvSDKD +++ mktemp ++ local LAST_ERR=/tmp/tmp.OvcQD4BneY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oKqGOvSDKD ++ cat /tmp/tmp.OvcQD4BneY ++ rm /tmp/tmp.oKqGOvSDKD /tmp/tmp.OvcQD4BneY ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-d49c7dc6d-mzznv ++ mktemp + local LAST_OUT=/tmp/tmp.8Db4Od0PPG ++ mktemp + local LAST_ERR=/tmp/tmp.OUU9aOGtwL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs percona-server-mongodb-operator-d49c7dc6d-mzznv + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8Db4Od0PPG + cat /tmp/tmp.OUU9aOGtwL + rm /tmp/tmp.8Db4Od0PPG /tmp/tmp.OUU9aOGtwL + return 0 2025-06-06T08:18:35.330Z INFO setup Manager starting up {"gitCommit": "db56ee7c993f5ad2d1ada9b0fbbd4e3142f61963", "gitBranch": "PR-1961-db56ee7c", "buildTime": "", "goVersion": "go1.24.4", "os": "linux", "arch": "amd64"} + create_namespace pitr-physical-5149 + local namespace=pitr-physical-5149 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl api-resources ++ grep chaos-mesh.org ++ awk '{print $1}' ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrole ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces pitr-physical-5149' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pitr-physical-5149 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pitr-physical-5149 --ignore-not-found + awk '{print$1}' + xargs kubectl delete ns ++ mktemp + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + local LAST_OUT=/tmp/tmp.3eDraXpNIH + kubectl_bin get ns ++ mktemp + local LAST_OUT=/tmp/tmp.itOtTBBx6w ++ mktemp + local LAST_ERR=/tmp/tmp.iHCspj9Gg3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ mktemp + local LAST_ERR=/tmp/tmp.smScgw1Ety + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pitr-physical-5149 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.itOtTBBx6w + cat /tmp/tmp.iHCspj9Gg3 + rm /tmp/tmp.itOtTBBx6w /tmp/tmp.iHCspj9Gg3 + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3eDraXpNIH + cat /tmp/tmp.smScgw1Ety + rm /tmp/tmp.3eDraXpNIH /tmp/tmp.smScgw1Ety + return 0 + kubectl_bin wait --for=delete namespace pitr-physical-5149 ++ mktemp + local LAST_OUT=/tmp/tmp.5spQURy5Z9 ++ mktemp + local LAST_ERR=/tmp/tmp.8aM7XP762s + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace pitr-physical-5149 namespace "gke-managed-cim" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5spQURy5Z9 + cat /tmp/tmp.8aM7XP762s + rm /tmp/tmp.5spQURy5Z9 /tmp/tmp.8aM7XP762s + return 0 + desc 'create namespace pitr-physical-5149' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pitr-physical-5149 ----------------------------------------------------------------------------------- + kubectl_bin create namespace pitr-physical-5149 ++ mktemp + local LAST_OUT=/tmp/tmp.GefvPzlXYP ++ mktemp + local LAST_ERR=/tmp/tmp.cXo0iAtZb5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pitr-physical-5149 namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GefvPzlXYP namespace/pitr-physical-5149 created + cat /tmp/tmp.cXo0iAtZb5 + rm /tmp/tmp.GefvPzlXYP /tmp/tmp.cXo0iAtZb5 + return 0 + set_kube_ctx pitr-physical-5149 + local namespace=pitr-physical-5149 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.tiOgyLxv5c +++ mktemp ++ local LAST_ERR=/tmp/tmp.9OjAewJqCp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tiOgyLxv5c ++ cat /tmp/tmp.9OjAewJqCp ++ rm /tmp/tmp.tiOgyLxv5c /tmp/tmp.9OjAewJqCp ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-db56ee7c-3-cluster9 --namespace=pitr-physical-5149 ++ mktemp + local LAST_OUT=/tmp/tmp.3RGx7HEvix ++ mktemp + local LAST_ERR=/tmp/tmp.yl0fXHtSYi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-db56ee7c-3-cluster9 --namespace=pitr-physical-5149 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3RGx7HEvix Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-db56ee7c-3-cluster9" modified. + cat /tmp/tmp.yl0fXHtSYi + rm /tmp/tmp.3RGx7HEvix /tmp/tmp.yl0fXHtSYi + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Fri Jun 6 08:19:15 2025 NAMESPACE: pitr-physical-5149 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.pitr-physical-5149.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace pitr-physical-5149 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace pitr-physical-5149 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace pitr-physical-5149 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace pitr-physical-5149 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.09qp6nkOBJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.p9xYJUXMM2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.09qp6nkOBJ ++ cat /tmp/tmp.p9xYJUXMM2 ++ rm /tmp/tmp.09qp6nkOBJ /tmp/tmp.p9xYJUXMM2 ++ return 0 + MINIO_POD=minio-service-86dfccd949-7f7gf + wait_pod minio-service-86dfccd949-7f7gf + local pod=minio-service-86dfccd949-7f7gf + set +o xtrace waiting for pod/minio-service-86dfccd949-7f7gf to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-physical-5149.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.uk4F4IbZtd ++ mktemp + local LAST_ERR=/tmp/tmp.30fsnwFvUK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-physical-5149.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uk4F4IbZtd service/minio-service created + cat /tmp/tmp.30fsnwFvUK + rm /tmp/tmp.uk4F4IbZtd /tmp/tmp.30fsnwFvUK + return 0 + create_minio_bucket operator-testing + local bucket=operator-testing + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.FWOidRZpND ++ mktemp + local LAST_ERR=/tmp/tmp.UomvrBxRaW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FWOidRZpND make_bucket: operator-testing pod "aws-cli" deleted + cat /tmp/tmp.UomvrBxRaW + rm /tmp/tmp.FWOidRZpND /tmp/tmp.UomvrBxRaW + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/minio-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.JGUbHnggnE ++ mktemp + local LAST_ERR=/tmp/tmp.4Y93EbsK12 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/minio-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JGUbHnggnE secret/some-users created deployment.apps/psmdb-client created secret/minio-secret created + cat /tmp/tmp.4Y93EbsK12 + rm /tmp/tmp.JGUbHnggnE /tmp/tmp.4Y93EbsK12 + return 0 + cluster=some-name + desc 'create first PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/conf/some-name-rs0.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/conf/some-name-rs0.yml + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/conf/some-name-rs0.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1961-db56ee7c"' + local LAST_OUT=/tmp/tmp.2mE7Uvx5FX + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' ++ mktemp + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + local LAST_ERR=/tmp/tmp.DZhcGgbLt6 + local exit_status=0 + local timeout=4 + yq eval '.spec.upgradeOptions.apply="Never"' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2mE7Uvx5FX perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.DZhcGgbLt6 + rm /tmp/tmp.2mE7Uvx5FX /tmp/tmp.DZhcGgbLt6 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready......OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sGm1zrfCaF +++ mktemp ++ local LAST_ERR=/tmp/tmp.FcNqimqM7C ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sGm1zrfCaF ++ cat /tmp/tmp.FcNqimqM7C ++ rm /tmp/tmp.sGm1zrfCaF /tmp/tmp.FcNqimqM7C ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.c1xaYAcaEH +++ mktemp ++ local LAST_ERR=/tmp/tmp.HC6MG4q5ih ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.c1xaYAcaEH ++ cat /tmp/tmp.HC6MG4q5ih ++ rm /tmp/tmp.c1xaYAcaEH /tmp/tmp.HC6MG4q5ih ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.A55N4WvBZK +++ mktemp ++ local LAST_ERR=/tmp/tmp.Zbdd7ZYJUy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.A55N4WvBZK ++ cat /tmp/tmp.Zbdd7ZYJUy ++ rm /tmp/tmp.A55N4WvBZK /tmp/tmp.Zbdd7ZYJUy ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness........................................ + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vNytnO4uGS +++ mktemp ++ local LAST_ERR=/tmp/tmp.wbCPjptmrj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vNytnO4uGS ++ cat /tmp/tmp.wbCPjptmrj ++ rm /tmp/tmp.vNytnO4uGS /tmp/tmp.wbCPjptmrj ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kkapcWruYr +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z49NBM84CW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kkapcWruYr ++ cat /tmp/tmp.Z49NBM84CW ++ rm /tmp/tmp.kkapcWruYr /tmp/tmp.Z49NBM84CW ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aTT4hyKRVi +++ mktemp ++ local LAST_ERR=/tmp/tmp.9n04mfqW4L ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aTT4hyKRVi ++ cat /tmp/tmp.9n04mfqW4L ++ rm /tmp/tmp.aTT4hyKRVi /tmp/tmp.9n04mfqW4L ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + write_initial_data + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.pitr-physical-5149 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.pitr-physical-5149 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iLP20eamti +++ mktemp ++ local LAST_ERR=/tmp/tmp.wRQdLM8D7I ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iLP20eamti ++ cat /tmp/tmp.wRQdLM8D7I ++ rm /tmp/tmp.iLP20eamti /tmp/tmp.wRQdLM8D7I ++ return 0 + local client_container=psmdb-client-66f577db5f-ltmps + kubectl_bin exec psmdb-client-66f577db5f-ltmps -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-physical-5149.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.nBwR1s8lG0 ++ mktemp + local LAST_ERR=/tmp/tmp.ItPC0wY0IX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-ltmps -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-physical-5149.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nBwR1s8lG0 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-5149.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("4dac3abe-1c83-40de-bf9a-4fc83beb8e6e") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.ItPC0wY0IX + rm /tmp/tmp.nBwR1s8lG0 /tmp/tmp.ItPC0wY0IX + return 0 + sleep 2 + write_document + local cmp_postfix= + local sleep_value=0 + log 'write initial data, read from all' + set +o xtrace [2025-06-06T08:23:36+0000] write initial data, read from all + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-physical-5149 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-5149 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.s5VXrBYe8l +++ mktemp ++ local LAST_ERR=/tmp/tmp.68dltmdNME ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.s5VXrBYe8l ++ cat /tmp/tmp.68dltmdNME ++ rm /tmp/tmp.s5VXrBYe8l /tmp/tmp.68dltmdNME ++ return 0 + local client_container=psmdb-client-66f577db5f-ltmps + kubectl_bin exec psmdb-client-66f577db5f-ltmps -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-5149.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.td3HuN7WJq ++ mktemp + local LAST_ERR=/tmp/tmp.KywOoGFEyo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-ltmps -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-5149.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.td3HuN7WJq Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-5149.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("ad088c4d-ae6e-4d59-8a0d-b07f079a75b8") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.KywOoGFEyo + rm /tmp/tmp.td3HuN7WJq /tmp/tmp.KywOoGFEyo + return 0 + sleep 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-5149 + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-5149 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-5149 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-5149 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ awk -F: '{print $2}' ++ echo .svc.cluster.local + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rqVw1ZkDLx +++ mktemp ++ local LAST_ERR=/tmp/tmp.E5iluR79bz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rqVw1ZkDLx ++ cat /tmp/tmp.E5iluR79bz ++ rm /tmp/tmp.rqVw1ZkDLx /tmp/tmp.E5iluR79bz ++ return 0 + local client_container=psmdb-client-66f577db5f-ltmps + kubectl_bin exec psmdb-client-66f577db5f-ltmps -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-5149.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.Y0XdVg01DF ++ mktemp + local LAST_ERR=/tmp/tmp.bPNU6DOPqk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-ltmps -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-5149.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Y0XdVg01DF + cat /tmp/tmp.bPNU6DOPqk + rm /tmp/tmp.Y0XdVg01DF /tmp/tmp.bPNU6DOPqk + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/compare/find.json /tmp/tmp.mlcIBzXwjL/find + desc 'wait for pbm agents to start' + set +o xtrace ----------------------------------------------------------------------------------- wait for pbm agents to start ----------------------------------------------------------------------------------- + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-0...2025-06-06T08:23:09.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-1...2025-06-06T08:23:07.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-2...2025-06-06T08:23:07.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-0 + local agent_pod=some-name-rs1-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-0...2025-06-06T08:23:09.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-1 + local agent_pod=some-name-rs1-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-1...2025-06-06T08:23:11.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-2 + local agent_pod=some-name-rs1-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-2...2025-06-06T08:23:08.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-0 + local agent_pod=some-name-rs2-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-0...2025-06-06T08:23:12.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-1 + local agent_pod=some-name-rs2-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-1...2025-06-06T08:23:08.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-2 + local agent_pod=some-name-rs2-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-2...2025-06-06T08:23:08.000+0000 I listening for the commands + log 'Sleeping for 360 seconds to avoid PBM-1265' + set +o xtrace [2025-06-06T08:23:55+0000] Sleeping for 360 seconds to avoid PBM-1265 + sleep 360 + backup_name_minio=backup-minio + run_backup backup-minio 2 physical + local name=backup-minio + local idx=2 + local type=physical + desc 'run backup backup-minio-2' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-2 ----------------------------------------------------------------------------------- + /usr/bin/sed -e 's/name:/name: backup-minio-2/' + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/conf/backup-minio.yml ++ mktemp + local LAST_OUT=/tmp/tmp.DRXRyUGvDO + /usr/bin/sed -e 's/type:/type: physical/' ++ mktemp + local LAST_ERR=/tmp/tmp.CBTUm9Xn2d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DRXRyUGvDO perconaservermongodbbackup.psmdb.percona.com/backup-minio-2 created + cat /tmp/tmp.CBTUm9Xn2d + rm /tmp/tmp.DRXRyUGvDO /tmp/tmp.CBTUm9Xn2d + return 0 + wait_backup backup-minio-2 + local backup_name=backup-minio-2 + local target_state=ready + set +o xtrace waiting for backup-minio-2 to reach ready state...... + sleep 5 + write_document -2nd 130 + local cmp_postfix=-2nd + local sleep_value=130 + log 'write initial data, read from all' + set +o xtrace [2025-06-06T08:30:16+0000] write initial data, read from all + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-physical-5149 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-5149 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WLHndTr25a +++ mktemp ++ local LAST_ERR=/tmp/tmp.ng8duJPPpM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WLHndTr25a ++ cat /tmp/tmp.ng8duJPPpM ++ rm /tmp/tmp.WLHndTr25a /tmp/tmp.ng8duJPPpM ++ return 0 + local client_container=psmdb-client-66f577db5f-ltmps + kubectl_bin exec psmdb-client-66f577db5f-ltmps -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-5149.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.1Ckhql7q9h ++ mktemp + local LAST_ERR=/tmp/tmp.22FEfJuUIc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-ltmps -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-5149.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1Ckhql7q9h Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-5149.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("a51253e5-3600-4e1f-b57c-6dc1944da18b") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.22FEfJuUIc + rm /tmp/tmp.1Ckhql7q9h /tmp/tmp.22FEfJuUIc + return 0 + sleep 130 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-5149 -2nd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-5149 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local port=27017 + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-5149 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-5149 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6HrbX37VrM +++ mktemp ++ local LAST_ERR=/tmp/tmp.8oATgxQSDT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6HrbX37VrM ++ cat /tmp/tmp.8oATgxQSDT ++ rm /tmp/tmp.6HrbX37VrM /tmp/tmp.8oATgxQSDT ++ return 0 + local client_container=psmdb-client-66f577db5f-ltmps + kubectl_bin exec psmdb-client-66f577db5f-ltmps -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-5149.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.P83C5MzpWC ++ mktemp + local LAST_ERR=/tmp/tmp.jsivin2Get + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-ltmps -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-5149.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.P83C5MzpWC + cat /tmp/tmp.jsivin2Get + rm /tmp/tmp.P83C5MzpWC /tmp/tmp.jsivin2Get + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/compare/find-2nd.json /tmp/tmp.mlcIBzXwjL/find-2nd ++ date +%s + last_write=1749198752 ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.i5pGnItFpV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aEv24P8rsv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.i5pGnItFpV +++ cat /tmp/tmp.aEv24P8rsv +++ rm /tmp/tmp.i5pGnItFpV /tmp/tmp.aEv24P8rsv +++ return 0 ++ echo 1749198635 + last_chunk=1749198635 + log 'last write: 1749198752' + set +o xtrace [2025-06-06T08:32:34+0000] last write: 1749198752 + log 'last chunk: 1749198635' + set +o xtrace [2025-06-06T08:32:34+0000] last chunk: 1749198635 + retries=0 + [[ 1749198635 -gt 1749198752 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kqyTVF3ARm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cHdmEoAGcA +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kqyTVF3ARm +++ cat /tmp/tmp.cHdmEoAGcA +++ rm /tmp/tmp.kqyTVF3ARm /tmp/tmp.cHdmEoAGcA +++ return 0 ++ echo 1749198635 + last_chunk=1749198635 + retries=1 ++ format_date 1749198635 ++ local timestamp=1749198635 +++ TZ=UTC +++ /usr/bin/date -d@1749198635 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:30:35 ++ format_date 1749198752 ++ local timestamp=1749198752 +++ TZ=UTC +++ /usr/bin/date -d@1749198752 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:32 + log 'Waiting for last oplog chunk (2025-06-06 08:30:35) to be greater than last write (2025-06-06 08:32:32)' + set +o xtrace [2025-06-06T08:32:36+0000] Waiting for last oplog chunk (2025-06-06 08:30:35) to be greater than last write (2025-06-06 08:32:32) + sleep 10 + [[ 1749198635 -gt 1749198752 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PzthA1iKV9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IOXMpdk8R1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.PzthA1iKV9 +++ cat /tmp/tmp.IOXMpdk8R1 +++ rm /tmp/tmp.PzthA1iKV9 /tmp/tmp.IOXMpdk8R1 +++ return 0 ++ echo 1749198635 + last_chunk=1749198635 + retries=2 ++ format_date 1749198635 ++ local timestamp=1749198635 +++ TZ=UTC +++ /usr/bin/date -d@1749198635 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:30:35 ++ format_date 1749198752 ++ local timestamp=1749198752 +++ TZ=UTC +++ /usr/bin/date -d@1749198752 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:32 + log 'Waiting for last oplog chunk (2025-06-06 08:30:35) to be greater than last write (2025-06-06 08:32:32)' + set +o xtrace [2025-06-06T08:32:48+0000] Waiting for last oplog chunk (2025-06-06 08:30:35) to be greater than last write (2025-06-06 08:32:32) + sleep 10 + [[ 1749198635 -gt 1749198752 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JvVjmNwNHf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.t1zRy2fL4u +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.JvVjmNwNHf +++ cat /tmp/tmp.t1zRy2fL4u +++ rm /tmp/tmp.JvVjmNwNHf /tmp/tmp.t1zRy2fL4u +++ return 0 ++ echo 1749198643 + last_chunk=1749198643 + retries=3 ++ format_date 1749198643 ++ local timestamp=1749198643 +++ TZ=UTC +++ /usr/bin/date -d@1749198643 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:30:43 ++ format_date 1749198752 ++ local timestamp=1749198752 +++ TZ=UTC +++ /usr/bin/date -d@1749198752 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:32 + log 'Waiting for last oplog chunk (2025-06-06 08:30:43) to be greater than last write (2025-06-06 08:32:32)' + set +o xtrace [2025-06-06T08:33:00+0000] Waiting for last oplog chunk (2025-06-06 08:30:43) to be greater than last write (2025-06-06 08:32:32) + sleep 10 + [[ 1749198643 -gt 1749198752 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Fh9befQgjv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zCqeSjejSX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Fh9befQgjv +++ cat /tmp/tmp.zCqeSjejSX +++ rm /tmp/tmp.Fh9befQgjv /tmp/tmp.zCqeSjejSX +++ return 0 ++ echo 1749198759 + last_chunk=1749198759 + retries=4 ++ format_date 1749198759 ++ local timestamp=1749198759 +++ TZ=UTC +++ /usr/bin/date -d@1749198759 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:39 ++ format_date 1749198752 ++ local timestamp=1749198752 +++ TZ=UTC +++ /usr/bin/date -d@1749198752 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:32 + log 'Waiting for last oplog chunk (2025-06-06 08:32:39) to be greater than last write (2025-06-06 08:32:32)' + set +o xtrace [2025-06-06T08:33:13+0000] Waiting for last oplog chunk (2025-06-06 08:32:39) to be greater than last write (2025-06-06 08:32:32) + sleep 10 + [[ 1749198759 -gt 1749198752 ]] + sleep 10 + check_recovery backup-minio-2 date 1749198759 -2nd some-name + local backup_name=backup-minio-2 + local restore_type=date + local restore_date=1749198759 + local cmp_postfix=-2nd + local cluster_name=some-name + local backupSource= ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zfWA2TdWKJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pl6Fg53AFg +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.zfWA2TdWKJ +++ cat /tmp/tmp.pl6Fg53AFg +++ rm /tmp/tmp.zfWA2TdWKJ /tmp/tmp.pl6Fg53AFg +++ return 0 ++ echo 1749198759 + local latest_ts=1749198759 + desc 'write more data before restore by date' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by date ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.pitr-physical-5149 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-5149 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xkB9jDQkd7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.MLkZQ6cYs2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xkB9jDQkd7 ++ cat /tmp/tmp.MLkZQ6cYs2 ++ rm /tmp/tmp.xkB9jDQkd7 /tmp/tmp.MLkZQ6cYs2 ++ return 0 + local client_container=psmdb-client-66f577db5f-ltmps + kubectl_bin exec psmdb-client-66f577db5f-ltmps -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-5149.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.XWIfMCvzcx ++ mktemp + local LAST_ERR=/tmp/tmp.CpPt4uL5uD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-ltmps -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-5149.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XWIfMCvzcx Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-5149.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("fb069e8b-a258-402e-8d0e-6796cbe7b5a6") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.CpPt4uL5uD + rm /tmp/tmp.XWIfMCvzcx /tmp/tmp.CpPt4uL5uD + return 0 + [[ -n 1749198759 ]] ++ format_date 1749198759 ++ local timestamp=1749198759 +++ TZ=UTC +++ /usr/bin/date -d@1749198759 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:39 + desc 'Restoring to time 2025-06-06 08:32:39' + set +o xtrace ----------------------------------------------------------------------------------- Restoring to time 2025-06-06 08:32:39 ----------------------------------------------------------------------------------- + retries=0 + [[ 1749198759 -gt 1749198759 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JVybCIdUT5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.WoUWmioxAq +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.JVybCIdUT5 +++ cat /tmp/tmp.WoUWmioxAq +++ rm /tmp/tmp.JVybCIdUT5 /tmp/tmp.WoUWmioxAq +++ return 0 ++ echo 1749198759 + latest_ts=1749198759 + retries=1 ++ format_date 1749198759 ++ local timestamp=1749198759 +++ TZ=UTC +++ /usr/bin/date -d@1749198759 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:39 ++ format_date 1749198759 ++ local timestamp=1749198759 +++ TZ=UTC +++ /usr/bin/date -d@1749198759 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:39 + echo 'Waiting for last oplog chunk (2025-06-06 08:32:39) to be greater than restore target (2025-06-06 08:32:39)' Waiting for last oplog chunk (2025-06-06 08:32:39) to be greater than restore target (2025-06-06 08:32:39) + sleep 10 + [[ 1749198759 -gt 1749198759 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2dBSKKFEVu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.sjkLGfCAEo +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2dBSKKFEVu +++ cat /tmp/tmp.sjkLGfCAEo +++ rm /tmp/tmp.2dBSKKFEVu /tmp/tmp.sjkLGfCAEo +++ return 0 ++ echo 1749198759 + latest_ts=1749198759 + retries=2 ++ format_date 1749198759 ++ local timestamp=1749198759 +++ TZ=UTC +++ /usr/bin/date -d@1749198759 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:39 ++ format_date 1749198759 ++ local timestamp=1749198759 +++ TZ=UTC +++ /usr/bin/date -d@1749198759 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:39 + echo 'Waiting for last oplog chunk (2025-06-06 08:32:39) to be greater than restore target (2025-06-06 08:32:39)' Waiting for last oplog chunk (2025-06-06 08:32:39) to be greater than restore target (2025-06-06 08:32:39) + sleep 10 + [[ 1749198759 -gt 1749198759 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ozjkuWywSz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.lvvFmeKOep +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ozjkuWywSz +++ cat /tmp/tmp.lvvFmeKOep +++ rm /tmp/tmp.ozjkuWywSz /tmp/tmp.lvvFmeKOep +++ return 0 ++ echo 1749198759 + latest_ts=1749198759 + retries=3 ++ format_date 1749198759 ++ local timestamp=1749198759 +++ TZ=UTC +++ /usr/bin/date -d@1749198759 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:39 ++ format_date 1749198759 ++ local timestamp=1749198759 +++ TZ=UTC +++ /usr/bin/date -d@1749198759 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:39 + echo 'Waiting for last oplog chunk (2025-06-06 08:32:39) to be greater than restore target (2025-06-06 08:32:39)' Waiting for last oplog chunk (2025-06-06 08:32:39) to be greater than restore target (2025-06-06 08:32:39) + sleep 10 + [[ 1749198759 -gt 1749198759 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4ThffUpHZV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qDioJ6dnRP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4ThffUpHZV +++ cat /tmp/tmp.qDioJ6dnRP +++ rm /tmp/tmp.4ThffUpHZV /tmp/tmp.qDioJ6dnRP +++ return 0 ++ echo 1749198759 + latest_ts=1749198759 + retries=4 ++ format_date 1749198759 ++ local timestamp=1749198759 +++ TZ=UTC +++ /usr/bin/date -d@1749198759 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:39 ++ format_date 1749198759 ++ local timestamp=1749198759 +++ TZ=UTC +++ /usr/bin/date -d@1749198759 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:39 + echo 'Waiting for last oplog chunk (2025-06-06 08:32:39) to be greater than restore target (2025-06-06 08:32:39)' Waiting for last oplog chunk (2025-06-06 08:32:39) to be greater than restore target (2025-06-06 08:32:39) + sleep 10 + [[ 1749198759 -gt 1749198759 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cPy5DWiBRo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.isA9dM23bc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.cPy5DWiBRo +++ cat /tmp/tmp.isA9dM23bc +++ rm /tmp/tmp.cPy5DWiBRo /tmp/tmp.isA9dM23bc +++ return 0 ++ echo 1749198759 + latest_ts=1749198759 + retries=5 ++ format_date 1749198759 ++ local timestamp=1749198759 +++ TZ=UTC +++ /usr/bin/date -d@1749198759 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:39 ++ format_date 1749198759 ++ local timestamp=1749198759 +++ TZ=UTC +++ /usr/bin/date -d@1749198759 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:39 + echo 'Waiting for last oplog chunk (2025-06-06 08:32:39) to be greater than restore target (2025-06-06 08:32:39)' Waiting for last oplog chunk (2025-06-06 08:32:39) to be greater than restore target (2025-06-06 08:32:39) + sleep 10 + [[ 1749198759 -gt 1749198759 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YnLNrqm6Mf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.p2T0yTj8W0 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.YnLNrqm6Mf +++ cat /tmp/tmp.p2T0yTj8W0 +++ rm /tmp/tmp.YnLNrqm6Mf /tmp/tmp.p2T0yTj8W0 +++ return 0 ++ echo 1749198759 + latest_ts=1749198759 + retries=6 ++ format_date 1749198759 ++ local timestamp=1749198759 +++ TZ=UTC +++ /usr/bin/date -d@1749198759 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:39 ++ format_date 1749198759 ++ local timestamp=1749198759 +++ TZ=UTC +++ /usr/bin/date -d@1749198759 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:39 + echo 'Waiting for last oplog chunk (2025-06-06 08:32:39) to be greater than restore target (2025-06-06 08:32:39)' Waiting for last oplog chunk (2025-06-06 08:32:39) to be greater than restore target (2025-06-06 08:32:39) + sleep 10 + [[ 1749198759 -gt 1749198759 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Dswv55KxiJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.b5tZDN4BHD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Dswv55KxiJ +++ cat /tmp/tmp.b5tZDN4BHD +++ rm /tmp/tmp.Dswv55KxiJ /tmp/tmp.b5tZDN4BHD +++ return 0 ++ echo 1749198759 + latest_ts=1749198759 + retries=7 ++ format_date 1749198759 ++ local timestamp=1749198759 +++ TZ=UTC +++ /usr/bin/date -d@1749198759 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:39 ++ format_date 1749198759 ++ local timestamp=1749198759 +++ TZ=UTC +++ /usr/bin/date -d@1749198759 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:39 + echo 'Waiting for last oplog chunk (2025-06-06 08:32:39) to be greater than restore target (2025-06-06 08:32:39)' Waiting for last oplog chunk (2025-06-06 08:32:39) to be greater than restore target (2025-06-06 08:32:39) + sleep 10 + [[ 1749198759 -gt 1749198759 ]] + [[ 7 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Fw8llm4Aq2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qKVS382Zn1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Fw8llm4Aq2 +++ cat /tmp/tmp.qKVS382Zn1 +++ rm /tmp/tmp.Fw8llm4Aq2 /tmp/tmp.qKVS382Zn1 +++ return 0 ++ echo 1749198773 + latest_ts=1749198773 + retries=8 ++ format_date 1749198773 ++ local timestamp=1749198773 +++ TZ=UTC +++ /usr/bin/date -d@1749198773 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:53 ++ format_date 1749198759 ++ local timestamp=1749198759 +++ TZ=UTC +++ /usr/bin/date -d@1749198759 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-06-06 08:32:39 + echo 'Waiting for last oplog chunk (2025-06-06 08:32:53) to be greater than restore target (2025-06-06 08:32:39)' Waiting for last oplog chunk (2025-06-06 08:32:53) to be greater than restore target (2025-06-06 08:32:39) + sleep 10 + [[ 1749198773 -gt 1749198759 ]] + '[' -z '' ']' + desc 'check restore by date' + set +o xtrace ----------------------------------------------------------------------------------- check restore by date ----------------------------------------------------------------------------------- + /usr/bin/sed -e 's/name:/name: restore-backup-minio-2/' + /usr/bin/sed -e 's/backupName:/backupName: backup-minio-2/' + kubectl_bin apply -f - + /usr/bin/sed -e /backupSource/,+8d + '[' -z 1749198759 ']' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/conf/restore.yml + /usr/bin/sed -e 's/pitrType:/type: date/' ++ format_date 1749198759 ++ mktemp ++ local timestamp=1749198759 +++ TZ=UTC +++ /usr/bin/date -d@1749198759 '+%Y-%m-%d %H:%M:%S' + local LAST_OUT=/tmp/tmp.IAHrm7OlxJ ++ mktemp ++ echo 2025-06-06 08:32:39 + local LAST_ERR=/tmp/tmp.WN6pIEknIR + local exit_status=0 + local timeout=4 + /usr/bin/sed -e 's/date:/date: 2025-06-06 08:32:39/' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IAHrm7OlxJ perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-2 created + cat /tmp/tmp.WN6pIEknIR + rm /tmp/tmp.IAHrm7OlxJ /tmp/tmp.WN6pIEknIR + return 0 + wait_restore backup-minio-2 some-name requested 0 1200 + local backup_name=backup-minio-2 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-2 object to be createdOK Waiting psmdb-restore/restore-backup-minio-2 to reach state "requested" ...........OK after 11 minutes + [[ 0 -eq 1 ]] + echo + wait_restore backup-minio-2 some-name ready 0 1600 + local backup_name=backup-minio-2 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1600 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-2 object to be createdOK Waiting psmdb-restore/restore-backup-minio-2 to reach state "ready" ... ----------------------------------------------------------------------------------- pbm-agent logs from some-name-rs0-0 ----------------------------------------------------------------------------------- + kubectl_bin exec some-name-rs0-0 -- cat /tmp/pbm-agent.log ++ mktemp + local LAST_OUT=/tmp/tmp.5w4rukfdDA ++ mktemp + local LAST_ERR=/tmp/tmp.yWANqCL6Bx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec some-name-rs0-0 -- cat /tmp/pbm-agent.log + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5w4rukfdDA 2025/06/06 08:41:09 [entrypoint] starting `pbm-agent` 2025-06-06T08:41:10.000+0000 I % _____ %%% | __ \ ###%%%%%%%%%%%%* | |__) |__ _ __ ___ ___ _ __ __ _ ### ##%% %%%% | ___/ _ \ '__/ __/ _ \| '_ \ / _` | #### ##% %%%% | | | __/ | | (_| (_) | | | | (_| | ### #### %%% |_| \___|_| \___\___/|_| |_|\__,_| ,((### ### %%% _____ _ (((( (### #### %%%% / ____| | | ((( ((# ###### | (___ __ _ _ _ __ _ __| | (((( (((# #### \___ \ / _` | | | |/ _` |/ _` | /(( ,((( *### ____) | (_| | |_| | (_| | (_| | //// ((( #### |_____/ \__, |\__,_|\__,_|\__,_| /// (((( #### | | /////////////(((((((((((((((((######## |_| Join @ squad.percona.com/mongodb ** Join Percona Squad! ** Participate in monthly SWAG raffles, get early access to new product features, invite-only ”ask me anything” sessions with database performance experts. Interested? Fill in the form at squad.percona.com/mongodb 2025-06-06T08:41:10.000+0000 I log options: log-path=/dev/stderr, log-level:D, log-json:false 2025-06-06T08:41:10.000+0000 I pbm-agent: Version: 2.9.1 Platform: linux/amd64 GitCommit: 1f8d60145ed5e7b834c6d7c5318446071bc04ecb GitBranch: release-2.9.1 BuildTime: 2025-03-25_11:35_UTC GoVersion: go1.22.8 2025-06-06T08:41:10.000+0000 I starting PITR routine 2025-06-06T08:41:10.000+0000 I node: rs0/some-name-rs0-0.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017 2025-06-06T08:41:10.000+0000 I conn level ReadConcern: majority; WriteConcern: majority 2025-06-06T08:41:10.000+0000 I listening for the commands 2025-06-06T08:46:50.000+0000 I got command restore [name: 2025-06-06T08:46:49.801096496Z, snapshot: 2025-06-06T08:29:58Z point-in-time: <1749198759,0>] , opid: 6842aaf9fb2c99b9a202e303 2025-06-06T08:46:50.000+0000 I got epoch {1749199610 13} 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] to time: 2025-06-06T08:32:39Z 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] oplog slicer disabled 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] backup: 2025-06-06T08:29:58Z 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] recovery started 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] port: 27992 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-0.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/rs.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/cluster.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] mongod binary: mongod, version: v7.0.18-11 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] moving to state starting 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-0.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017.starting" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for `starting` status in rs map[.pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-0.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-1.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-2.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017:{}] 2025-06-06T08:46:55.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/rs.starting" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:55.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for cluster 2025-06-06T08:47:05.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] converged to state starting 2025-06-06T08:47:05.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] starting 2025-06-06T08:47:05.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] moving to state running 2025-06-06T08:47:05.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-0.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017.running" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:47:05.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for `running` status in rs map[.pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-0.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-1.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-2.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017:{}] 2025-06-06T08:47:10.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/rs.running" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:47:10.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for cluster 2025-06-06T08:47:20.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] converged to state running 2025-06-06T08:47:20.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] send to stopAgent chan 2025-06-06T08:47:21.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] stop agents heartbeats 2025-06-06T08:47:21.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] stopping mongod and flushing old data 2025-06-06T08:47:21.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] shutdown server 2025-06-06T08:47:21.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:22.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:23.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:24.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:25.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:26.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:27.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:28.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:29.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:30.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:31.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:32.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:33.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:34.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:35.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:36.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:39.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:40.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:41.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:42.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:43.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:44.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:45.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting for the node to shutdown 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/rs.down" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove old data 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/mongod.lock 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-1-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-53-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-47-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-7-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/diagnostic.data 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-11-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/journal 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-28-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-3-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-66-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-18-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/mongod-data 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-64-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-26-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-12-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-5-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-56-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-69-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/storage.bson 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-57-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-43-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-63-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-21-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-15-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-40-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/psmdb_telemetry.data 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-4-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-52-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-0-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-41-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-54-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-32-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-25-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-48-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-60-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-2-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-33-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/_mdb_catalog.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-14-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-49-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-13-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-61-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger.lock 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-70-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-46-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-23-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-16-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-55-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-20-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-22-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-65-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-38-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-31-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-58-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-45-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/sizeStorer.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-42-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/key.db 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-9-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger.turtle 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-35-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-50-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-8-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-27-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-10-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-59-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-30-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/lost+found 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-6-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-17-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-19-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-62-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-51-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-36-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-68-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-29-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTigerHS.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-24-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-39-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-71-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-37-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-34-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-44-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-67-4190350937520471217.wt 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copying backup data 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] download max buf 1073741824 (arena 268435456, span 33554432, concurrency 4) 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/key.db/WiredTigerLog.0000000001.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/key.db/WiredTigerHS.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/key.db/key.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/key.db/parameters.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/key.db/WiredTiger.backup.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/key.db/WiredTiger.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/journal/WiredTigerLog.0000000001.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/WiredTigerHS.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/_mdb_catalog.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-0--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-10--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-12--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-14--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-18--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-19--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-2--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-21--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-24--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-27--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-28--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-31--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-34--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-37--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-4--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-40--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-43--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-46--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-49--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-52--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-54--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-56--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-58--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-6--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-60--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-62--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-64--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-66--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-68--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-70--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-8--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-1--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-11--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-13--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-15--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-20--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-22--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-23--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-25--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-26--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-29--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-3--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-30--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-32--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-33--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-35--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-36--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-38--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-39--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-41--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-42--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-44--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-45--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-47--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-48--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-5--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-50--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-51--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-53--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-55--141914621497047119.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-57--141914621497047119.wt.gz> to 2025-06-06T08:48:04.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-59--141914621497047119.wt.gz> to 2025-06-06T08:48:04.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-61--141914621497047119.wt.gz> to 2025-06-06T08:48:04.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-63--141914621497047119.wt.gz> to 2025-06-06T08:48:04.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-65--141914621497047119.wt.gz> to 2025-06-06T08:48:04.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-67--141914621497047119.wt.gz> to 2025-06-06T08:48:04.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-69--141914621497047119.wt.gz> to 2025-06-06T08:48:04.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-7--141914621497047119.wt.gz> to 2025-06-06T08:48:04.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-71--141914621497047119.wt.gz> to 2025-06-06T08:48:04.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-72--141914621497047119.wt.gz> to 2025-06-06T08:48:04.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-73--141914621497047119.wt.gz> to 2025-06-06T08:48:04.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-9--141914621497047119.wt.gz> to 2025-06-06T08:48:04.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/sizeStorer.wt.gz> to 2025-06-06T08:48:04.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/WiredTiger.backup.gz> to 2025-06-06T08:48:04.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/WiredTiger.gz> to 2025-06-06T08:48:04.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/storage.bson.gz.0-114> to 2025-06-06T08:48:04.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/journal/WiredTigerLog.0000000002.gz> to 2025-06-06T08:48:04.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/key.db/WiredTigerLog.0000000002.gz> to 2025-06-06T08:48:04.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/key.db/WiredTigerLog.0000000001.gz> to 2025-06-06T08:48:04.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] download stat: buf 1073741824, arena 268435456, span 33554432, spanNum 8, cc 4, [{1 0} {1 0} {1 0} {1 0}] 2025-06-06T08:48:04.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] preparing data 2025-06-06T08:48:05.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] oplogTruncateAfterPoint: {1749198601 5} 2025-06-06T08:48:06.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] recovering oplog as standalone 2025-06-06T08:48:09.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] replaying pitr oplog 2025-06-06T08:48:14.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/rs.txn" [size hint: 2 (2.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:16.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] clean-up and reset replicaset config 2025-06-06T08:48:18.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] restore on node succeed 2025-06-06T08:48:18.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] moving to state done 2025-06-06T08:48:18.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-0.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017.done" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:18.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for `done` status in rs map[.pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-0.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-1.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-2.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017:{}] 2025-06-06T08:48:23.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/rs.done" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:23.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for shards map[.pbm.restore/2025-06-06T08:46:49.801096496Z/rs.cfg/rs:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/rs:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/rs:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/rs:{}] 2025-06-06T08:48:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-0.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/rs.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/cluster.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:49:08.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/cluster.done" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:49:08.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for cluster 2025-06-06T08:49:13.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] converged to state done 2025-06-06T08:49:13.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/stat.some-name-rs0-0.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017" [size hint: 73 (73.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:49:13.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] writing restore meta 2025-06-06T08:49:14.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z.json" [size hint: 9216 (9.00KB); part size: 10485760 (10.00MB)] 2025-06-06T08:49:14.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] rm tmp conf 2025-06-06T08:49:14.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] rm tmp logs 2025-06-06T08:49:14.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] recovery successfully finished 2025-06-06T08:49:14.000+0000 I change stream was closed 2025-06-06T08:49:14.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] hearbeats stopped 2025-06-06T08:49:14.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/log/some-name-rs0-0.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017.0.log" [size hint: -1 (unknown); part size: 10485760 (10.00MB)] 2025-06-06T08:49:14.000+0000 D [agentCheckup] deleting agent status 2025-06-06T08:49:14.000+0000 I Exit: 2025-06-06T08:49:14.000+0000 E [pitr] init: get conf: get: server selection error: server selection timeout, current topology: { Type: ReplicaSetNoPrimary, Servers: [{ Addr: some-name-cfg-0.some-name-cfg.pitr-physical-5149.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.141.90.80:27017: connect: connection refused }, { Addr: some-name-cfg-1.some-name-cfg.pitr-physical-5149.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.141.89.3:27017: connect: connection refused }, { Addr: some-name-cfg-2.some-name-cfg.pitr-physical-5149.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.141.88.78:27017: connect: connection refused }, ] } 2025/06/06 08:49:14 [entrypoint] `pbm-agent` exited with code 1 2025/06/06 08:49:14 [entrypoint] restart in 5 sec 2025/06/06 08:49:19 [entrypoint] starting `pbm-agent` 2025-06-06T08:49:49.000+0000 E Exit: connect to PBM: create mongo connection: ping: server selection error: server selection timeout, current topology: { Type: Unknown, Servers: [{ Addr: localhost:27017, Type: Unknown, Last error: dial tcp 127.0.0.1:27017: connect: connection refused }, ] } 2025/06/06 08:49:49 [entrypoint] `pbm-agent` exited with code 1 2025/06/06 08:49:49 [entrypoint] restart in 5 sec 2025/06/06 08:49:54 [entrypoint] starting `pbm-agent` + cat /tmp/tmp.yWANqCL6Bx Defaulted container "mongod" out of: mongod, mongo-init (init), pbm-init (init) + rm /tmp/tmp.5w4rukfdDA /tmp/tmp.yWANqCL6Bx + return 0 + for pod in '$(get_mongod_pods ${cluster})' + desc 'pbm-agent logs from some-name-rs0-1' + set +o xtrace ----------------------------------------------------------------------------------- pbm-agent logs from some-name-rs0-1 ----------------------------------------------------------------------------------- + kubectl_bin exec some-name-rs0-1 -- cat /tmp/pbm-agent.log ++ mktemp + local LAST_OUT=/tmp/tmp.0OdsaBlf1l ++ mktemp + local LAST_ERR=/tmp/tmp.D5MnMPVyeu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec some-name-rs0-1 -- cat /tmp/pbm-agent.log + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0OdsaBlf1l 2025/06/06 08:40:34 [entrypoint] starting `pbm-agent` 2025-06-06T08:40:36.000+0000 I % _____ %%% | __ \ ###%%%%%%%%%%%%* | |__) |__ _ __ ___ ___ _ __ __ _ ### ##%% %%%% | ___/ _ \ '__/ __/ _ \| '_ \ / _` | #### ##% %%%% | | | __/ | | (_| (_) | | | | (_| | ### #### %%% |_| \___|_| \___\___/|_| |_|\__,_| ,((### ### %%% _____ _ (((( (### #### %%%% / ____| | | ((( ((# ###### | (___ __ _ _ _ __ _ __| | (((( (((# #### \___ \ / _` | | | |/ _` |/ _` | /(( ,((( *### ____) | (_| | |_| | (_| | (_| | //// ((( #### |_____/ \__, |\__,_|\__,_|\__,_| /// (((( #### | | /////////////(((((((((((((((((######## |_| Join @ squad.percona.com/mongodb ** Join Percona Squad! ** Participate in monthly SWAG raffles, get early access to new product features, invite-only ”ask me anything” sessions with database performance experts. Interested? Fill in the form at squad.percona.com/mongodb 2025-06-06T08:40:36.000+0000 I log options: log-path=/dev/stderr, log-level:D, log-json:false 2025-06-06T08:40:36.000+0000 I pbm-agent: Version: 2.9.1 Platform: linux/amd64 GitCommit: 1f8d60145ed5e7b834c6d7c5318446071bc04ecb GitBranch: release-2.9.1 BuildTime: 2025-03-25_11:35_UTC GoVersion: go1.22.8 2025-06-06T08:40:36.000+0000 I starting PITR routine 2025-06-06T08:40:36.000+0000 I node: rs0/some-name-rs0-1.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017 2025-06-06T08:40:36.000+0000 I conn level ReadConcern: majority; WriteConcern: majority 2025-06-06T08:40:36.000+0000 I listening for the commands 2025-06-06T08:46:49.000+0000 I got command restore [name: 2025-06-06T08:46:49.801096496Z, snapshot: 2025-06-06T08:29:58Z point-in-time: <1749198759,0>] , opid: 6842aaf9fb2c99b9a202e303 2025-06-06T08:46:49.000+0000 I got epoch {1749198979 5} 2025-06-06T08:46:49.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] to time: 2025-06-06T08:32:39Z 2025-06-06T08:46:49.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] backup: 2025-06-06T08:29:58Z 2025-06-06T08:46:49.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] recovery started 2025-06-06T08:46:49.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] port: 28128 2025-06-06T08:46:49.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-1.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:49.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/rs.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:49.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/cluster.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] mongod binary: mongod, version: v7.0.18-11 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] moving to state starting 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-1.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017.starting" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for cluster 2025-06-06T08:47:05.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] converged to state starting 2025-06-06T08:47:05.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] starting 2025-06-06T08:47:05.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] moving to state running 2025-06-06T08:47:05.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-1.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017.running" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:47:05.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for cluster 2025-06-06T08:47:20.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] converged to state running 2025-06-06T08:47:20.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] send to stopAgent chan 2025-06-06T08:47:20.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] stop agents heartbeats 2025-06-06T08:47:20.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] stopping mongod and flushing old data 2025-06-06T08:47:20.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] shutdown server 2025-06-06T08:47:36.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting for the node to shutdown 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove old data 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/mongod.lock 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/journal 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/storage.bson 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-8--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-6--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-61--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-37--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTigerHS.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/lost+found 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-62--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/diagnostic.data 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/sizeStorer.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-41--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-49--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-54--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-2--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-35--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-32--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-67--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger.lock 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-24--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-40--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-21--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-66--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-65--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-20--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-48--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-50--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-11--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-27--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-29--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/_mdb_catalog.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-73--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-68--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-58--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-51--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-36--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-63--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-19--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-44--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-4--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-23--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-53--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/psmdb_telemetry.data 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-43--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-25--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/key.db 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-12--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-3--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-1--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-7--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-52--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-30--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger.turtle 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-56--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-72--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-59--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-33--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-64--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-47--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-26--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-34--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-46--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-13--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-69--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-38--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-10--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-18--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-15--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-55--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-5--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-57--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-60--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-22--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-42--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-31--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-28--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-39--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-71--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-14--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-9--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-45--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/mongod-data 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-0--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-70--7993108142314022865.wt 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copying backup data 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] download max buf 1073741824 (arena 268435456, span 33554432, concurrency 4) 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/key.db/WiredTigerLog.0000000001.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/key.db/WiredTigerHS.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/key.db/key.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/key.db/parameters.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/key.db/WiredTiger.backup.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/key.db/WiredTiger.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/journal/WiredTigerLog.0000000001.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/WiredTigerHS.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/_mdb_catalog.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-0--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-10--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-12--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-14--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-18--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-19--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-2--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-21--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-24--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-27--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-28--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-31--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-34--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-37--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-4--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-40--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-43--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-46--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-49--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-52--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-54--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-56--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-58--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-6--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-60--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-62--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-64--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-66--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-68--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-70--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/collection-8--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-1--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-11--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-13--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-15--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-20--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-22--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-23--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-25--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-26--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-29--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-3--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-30--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-32--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-33--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-35--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-36--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-38--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-39--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-41--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-42--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-44--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-45--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-47--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-48--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-5--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-50--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-51--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-53--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-55--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-57--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-59--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-61--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-63--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-65--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-67--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-69--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-7--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-71--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-72--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-73--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/index-9--141914621497047119.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/sizeStorer.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/WiredTiger.backup.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/WiredTiger.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/storage.bson.gz.0-114> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/journal/WiredTigerLog.0000000002.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/key.db/WiredTigerLog.0000000002.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs0/key.db/WiredTigerLog.0000000001.gz> to 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] download stat: buf 1073741824, arena 268435456, span 33554432, spanNum 8, cc 4, [{1 0} {1 0} {1 0} {1 0}] 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] preparing data 2025-06-06T08:47:41.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] oplogTruncateAfterPoint: {1749198601 5} 2025-06-06T08:47:42.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] recovering oplog as standalone 2025-06-06T08:47:44.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] clean-up and reset replicaset config 2025-06-06T08:47:47.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] restore on node succeed 2025-06-06T08:47:47.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] moving to state done 2025-06-06T08:47:47.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-1.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017.done" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:47:47.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for `done` status in rs map[.pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-0.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-1.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-2.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017:{}] 2025-06-06T08:48:22.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/rs.done" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:22.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for shards map[.pbm.restore/2025-06-06T08:46:49.801096496Z/rs.cfg/rs:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/rs:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/rs:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/rs:{}] 2025-06-06T08:48:49.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/node.some-name-rs0-1.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:49.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/rs.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:49.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/cluster.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:49:12.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/cluster.done" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:49:12.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for cluster 2025-06-06T08:49:17.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] converged to state done 2025-06-06T08:49:17.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/stat.some-name-rs0-1.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017" [size hint: 73 (73.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:49:17.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] writing restore meta 2025-06-06T08:49:17.000+0000 W [restore/2025-06-06T08:46:49.801096496Z] meta `.pbm.restore/2025-06-06T08:46:49.801096496Z.json` already exists, trying write done status with '' 2025-06-06T08:49:17.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] rm tmp conf 2025-06-06T08:49:17.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] rm tmp logs 2025-06-06T08:49:17.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] recovery successfully finished 2025-06-06T08:49:17.000+0000 I change stream was closed 2025-06-06T08:49:17.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] hearbeats stopped 2025-06-06T08:49:17.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/log/some-name-rs0-1.some-name-rs0.pitr-physical-5149.svc.cluster.local:27017.0.log" [size hint: -1 (unknown); part size: 10485760 (10.00MB)] 2025-06-06T08:49:17.000+0000 D [agentCheckup] deleting agent status 2025-06-06T08:49:17.000+0000 E [pitr] init: get conf: get: server selection error: server selection timeout, current topology: { Type: ReplicaSetNoPrimary, Servers: [{ Addr: some-name-cfg-0.some-name-cfg.pitr-physical-5149.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.141.90.80:27017: connect: connection refused }, { Addr: some-name-cfg-1.some-name-cfg.pitr-physical-5149.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.141.89.3:27017: connect: connection refused }, { Addr: some-name-cfg-2.some-name-cfg.pitr-physical-5149.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.141.88.78:27017: connect: connection refused }, ] } 2025-06-06T08:49:17.000+0000 I Exit: 2025/06/06 08:49:17 [entrypoint] `pbm-agent` exited with code 1 2025/06/06 08:49:17 [entrypoint] restart in 5 sec 2025/06/06 08:49:22 [entrypoint] starting `pbm-agent` 2025-06-06T08:49:52.000+0000 E Exit: connect to PBM: create mongo connection: ping: server selection error: server selection timeout, current topology: { Type: Unknown, Servers: [{ Addr: localhost:27017, Type: Unknown, Last error: dial tcp 127.0.0.1:27017: connect: connection refused }, ] } 2025/06/06 08:49:52 [entrypoint] `pbm-agent` exited with code 1 2025/06/06 08:49:52 [entrypoint] restart in 5 sec 2025/06/06 08:49:57 [entrypoint] starting `pbm-agent` + cat /tmp/tmp.D5MnMPVyeu Defaulted container "mongod" out of: mongod, mongo-init (init), pbm-init (init) + rm /tmp/tmp.0OdsaBlf1l /tmp/tmp.D5MnMPVyeu + return 0 + for pod in '$(get_mongod_pods ${cluster})' + desc 'pbm-agent logs from some-name-rs0-2' + set +o xtrace ----------------------------------------------------------------------------------- pbm-agent logs from some-name-rs0-2 ----------------------------------------------------------------------------------- + kubectl_bin exec some-name-rs0-2 -- cat /tmp/pbm-agent.log ++ mktemp + local LAST_OUT=/tmp/tmp.p1OarMYj0w ++ mktemp + local LAST_ERR=/tmp/tmp.ufAmRZGD8R + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec some-name-rs0-2 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.p1OarMYj0w + cat /tmp/tmp.ufAmRZGD8R Defaulted container "mongod" out of: mongod, mongo-init (init), pbm-init (init) error: Internal error occurred: unable to upgrade connection: container not found ("mongod") + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec some-name-rs0-2 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.p1OarMYj0w + cat /tmp/tmp.ufAmRZGD8R Defaulted container "mongod" out of: mongod, mongo-init (init), pbm-init (init) error: Internal error occurred: unable to upgrade connection: container not found ("mongod") + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec some-name-rs0-2 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.p1OarMYj0w + cat /tmp/tmp.ufAmRZGD8R Defaulted container "mongod" out of: mongod, mongo-init (init), pbm-init (init) error: Internal error occurred: unable to upgrade connection: container not found ("mongod") + sleep 8 + cat /tmp/tmp.p1OarMYj0w + cat /tmp/tmp.ufAmRZGD8R Defaulted container "mongod" out of: mongod, mongo-init (init), pbm-init (init) error: Internal error occurred: unable to upgrade connection: container not found ("mongod") + rm /tmp/tmp.p1OarMYj0w /tmp/tmp.ufAmRZGD8R + return 1 + echo 'failed to get /tmp/pbm-agent.log from some-name-rs0-2' failed to get /tmp/pbm-agent.log from some-name-rs0-2 + for pod in '$(get_mongod_pods ${cluster})' + desc 'pbm-agent logs from some-name-rs1-0' + set +o xtrace ----------------------------------------------------------------------------------- pbm-agent logs from some-name-rs1-0 ----------------------------------------------------------------------------------- + kubectl_bin exec some-name-rs1-0 -- cat /tmp/pbm-agent.log ++ mktemp + local LAST_OUT=/tmp/tmp.qaGDstNsFg ++ mktemp + local LAST_ERR=/tmp/tmp.lWoKQJbDR1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec some-name-rs1-0 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.qaGDstNsFg + cat /tmp/tmp.lWoKQJbDR1 Defaulted container "mongod" out of: mongod, mongo-init (init), pbm-init (init) error: Internal error occurred: unable to upgrade connection: container not found ("mongod") + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec some-name-rs1-0 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.qaGDstNsFg + cat /tmp/tmp.lWoKQJbDR1 Defaulted container "mongod" out of: mongod, mongo-init (init), pbm-init (init) error: Internal error occurred: unable to upgrade connection: container not found ("mongod") + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec some-name-rs1-0 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.qaGDstNsFg + cat /tmp/tmp.lWoKQJbDR1 Defaulted container "mongod" out of: mongod, mongo-init (init), pbm-init (init) error: Internal error occurred: unable to upgrade connection: container not found ("mongod") + sleep 8 + cat /tmp/tmp.qaGDstNsFg + cat /tmp/tmp.lWoKQJbDR1 Defaulted container "mongod" out of: mongod, mongo-init (init), pbm-init (init) error: Internal error occurred: unable to upgrade connection: container not found ("mongod") + rm /tmp/tmp.qaGDstNsFg /tmp/tmp.lWoKQJbDR1 + return 1 + echo 'failed to get /tmp/pbm-agent.log from some-name-rs1-0' failed to get /tmp/pbm-agent.log from some-name-rs1-0 + for pod in '$(get_mongod_pods ${cluster})' + desc 'pbm-agent logs from some-name-rs1-1' + set +o xtrace ----------------------------------------------------------------------------------- pbm-agent logs from some-name-rs1-1 ----------------------------------------------------------------------------------- + kubectl_bin exec some-name-rs1-1 -- cat /tmp/pbm-agent.log ++ mktemp + local LAST_OUT=/tmp/tmp.PHm7PPll6F ++ mktemp + local LAST_ERR=/tmp/tmp.MzhGIFRPS8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec some-name-rs1-1 -- cat /tmp/pbm-agent.log + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PHm7PPll6F 2025/06/06 08:39:35 [entrypoint] starting `pbm-agent` 2025-06-06T08:39:49.000+0000 I % _____ %%% | __ \ ###%%%%%%%%%%%%* | |__) |__ _ __ ___ ___ _ __ __ _ ### ##%% %%%% | ___/ _ \ '__/ __/ _ \| '_ \ / _` | #### ##% %%%% | | | __/ | | (_| (_) | | | | (_| | ### #### %%% |_| \___|_| \___\___/|_| |_|\__,_| ,((### ### %%% _____ _ (((( (### #### %%%% / ____| | | ((( ((# ###### | (___ __ _ _ _ __ _ __| | (((( (((# #### \___ \ / _` | | | |/ _` |/ _` | /(( ,((( *### ____) | (_| | |_| | (_| | (_| | //// ((( #### |_____/ \__, |\__,_|\__,_|\__,_| /// (((( #### | | /////////////(((((((((((((((((######## |_| Join @ squad.percona.com/mongodb ** Join Percona Squad! ** Participate in monthly SWAG raffles, get early access to new product features, invite-only ”ask me anything” sessions with database performance experts. Interested? Fill in the form at squad.percona.com/mongodb 2025-06-06T08:39:49.000+0000 I log options: log-path=/dev/stderr, log-level:D, log-json:false 2025-06-06T08:39:49.000+0000 I pbm-agent: Version: 2.9.1 Platform: linux/amd64 GitCommit: 1f8d60145ed5e7b834c6d7c5318446071bc04ecb GitBranch: release-2.9.1 BuildTime: 2025-03-25_11:35_UTC GoVersion: go1.22.8 2025-06-06T08:39:49.000+0000 I starting PITR routine 2025-06-06T08:39:49.000+0000 I node: rs1/some-name-rs1-1.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017 2025-06-06T08:39:49.000+0000 I conn level ReadConcern: majority; WriteConcern: majority 2025-06-06T08:39:49.000+0000 I listening for the commands 2025-06-06T08:46:50.000+0000 I got command restore [name: 2025-06-06T08:46:49.801096496Z, snapshot: 2025-06-06T08:29:58Z point-in-time: <1749198759,0>] , opid: 6842aaf9fb2c99b9a202e303 2025-06-06T08:46:50.000+0000 I got epoch {1749199610 54} 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] to time: 2025-06-06T08:32:39Z 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] backup: 2025-06-06T08:29:58Z 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] recovery started 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] port: 27753 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/node.some-name-rs1-1.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/rs.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/cluster.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] mongod binary: mongod, version: v7.0.18-11 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] moving to state starting 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/node.some-name-rs1-1.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017.starting" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for cluster 2025-06-06T08:47:05.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] converged to state starting 2025-06-06T08:47:05.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] starting 2025-06-06T08:47:05.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] moving to state running 2025-06-06T08:47:05.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/node.some-name-rs1-1.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017.running" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:47:05.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for cluster 2025-06-06T08:47:20.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] converged to state running 2025-06-06T08:47:20.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] send to stopAgent chan 2025-06-06T08:47:21.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] stop agents heartbeats 2025-06-06T08:47:21.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] stopping mongod and flushing old data 2025-06-06T08:47:21.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] shutdown server 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting for the node to shutdown 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove old data 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-12-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/psmdb_telemetry.data 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-51-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-33-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-55-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-18-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/_mdb_catalog.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTigerHS.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-44-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-66-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-24-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-61-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger.turtle 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-9-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-28-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-20-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-2-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-41-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-11-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-3-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-67-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-65-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/mongod-data 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-59-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-26-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-54-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-22-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-5-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-25-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-13-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-7-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-68-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/key.db 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/journal 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-21-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-35-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-42-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-49-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-57-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-38-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-60-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-8-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-14-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/mongod.lock 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-0-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/storage.bson 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-43-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-40-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-50-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-62-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-58-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-39-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-19-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-4-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-36-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-32-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-1-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/diagnostic.data 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/lost+found 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-53-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-29-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-56-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/sizeStorer.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-46-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-27-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-37-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-15-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-34-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-30-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-63-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-45-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-31-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger.lock 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-23-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-6-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-52-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-10-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-64-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-47-778333958477132706.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-48-778333958477132706.wt 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copying backup data 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] download max buf 1073741824 (arena 268435456, span 33554432, concurrency 4) 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/key.db/WiredTigerLog.0000000001.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/key.db/WiredTigerHS.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/key.db/key.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/key.db/parameters.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/key.db/WiredTiger.backup.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/key.db/WiredTiger.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/journal/WiredTigerLog.0000000001.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/WiredTigerHS.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/_mdb_catalog.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-0-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-10-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-12-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-14-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-18-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-19-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-2-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-22-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-24-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-27-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-28-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-31-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-34-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-36-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-39-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-4-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-42-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-45-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-48-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-51-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-54-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-56-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-58-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-6-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-60-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-62-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-64-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-66-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-8-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-1-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-11-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-13-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-15-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-20-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-21-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-23-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-25-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-26-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-29-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-3-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-30-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-32-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-33-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-35-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-37-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-38-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-40-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-41-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-43-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-44-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-46-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-47-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-49-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-5-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-50-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-52-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-53-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-55-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-57-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-59-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-61-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-63-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-65-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-67-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-68-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-7-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-9-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/sizeStorer.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/WiredTiger.backup.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/WiredTiger.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/storage.bson.gz.0-114> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/journal/WiredTigerLog.0000000002.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/key.db/WiredTigerLog.0000000002.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/key.db/WiredTigerLog.0000000001.gz> to 2025-06-06T08:47:39.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] download stat: buf 1073741824, arena 268435456, span 33554432, spanNum 8, cc 4, [{1 0} {1 0} {1 0} {1 0}] 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] preparing data 2025-06-06T08:47:41.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] oplogTruncateAfterPoint: {1749198601 5} 2025-06-06T08:47:43.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] recovering oplog as standalone 2025-06-06T08:47:46.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] clean-up and reset replicaset config 2025-06-06T08:47:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] restore on node succeed 2025-06-06T08:47:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] moving to state done 2025-06-06T08:47:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/node.some-name-rs1-1.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017.done" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:47:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for `done` status in rs map[.pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/node.some-name-rs1-0.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/node.some-name-rs1-1.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/node.some-name-rs1-2.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017:{}] 2025-06-06T08:48:25.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/rs.done" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:25.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for shards map[.pbm.restore/2025-06-06T08:46:49.801096496Z/rs.cfg/rs:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/rs:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/rs:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/rs:{}] 2025-06-06T08:48:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/node.some-name-rs1-1.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/rs.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/cluster.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:49:10.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/cluster.done" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:49:10.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for cluster 2025-06-06T08:49:15.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] converged to state done 2025-06-06T08:49:15.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/stat.some-name-rs1-1.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017" [size hint: 73 (73.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:49:15.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] writing restore meta 2025-06-06T08:49:15.000+0000 W [restore/2025-06-06T08:46:49.801096496Z] meta `.pbm.restore/2025-06-06T08:46:49.801096496Z.json` already exists, trying write done status with '' 2025-06-06T08:49:15.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] rm tmp conf 2025-06-06T08:49:15.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] rm tmp logs 2025-06-06T08:49:15.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] recovery successfully finished 2025-06-06T08:49:15.000+0000 I change stream was closed 2025-06-06T08:49:15.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] hearbeats stopped 2025-06-06T08:49:15.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/log/some-name-rs1-1.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017.0.log" [size hint: -1 (unknown); part size: 10485760 (10.00MB)] 2025-06-06T08:49:15.000+0000 D [agentCheckup] deleting agent status 2025-06-06T08:49:15.000+0000 I Exit: 2025/06/06 08:49:15 [entrypoint] `pbm-agent` exited with code 1 2025/06/06 08:49:15 [entrypoint] restart in 5 sec 2025/06/06 08:49:20 [entrypoint] starting `pbm-agent` 2025-06-06T08:49:50.000+0000 E Exit: connect to PBM: create mongo connection: ping: server selection error: server selection timeout, current topology: { Type: Unknown, Servers: [{ Addr: localhost:27017, Type: Unknown, Last error: dial tcp 127.0.0.1:27017: connect: connection refused }, ] } 2025/06/06 08:49:50 [entrypoint] `pbm-agent` exited with code 1 2025/06/06 08:49:50 [entrypoint] restart in 5 sec 2025/06/06 08:49:55 [entrypoint] starting `pbm-agent` 2025-06-06T08:50:25.000+0000 E Exit: connect to PBM: create mongo connection: ping: server selection error: server selection timeout, current topology: { Type: Unknown, Servers: [{ Addr: localhost:27017, Type: Unknown, Last error: dial tcp 127.0.0.1:27017: connect: connection refused }, ] } 2025/06/06 08:50:25 [entrypoint] `pbm-agent` exited with code 1 2025/06/06 08:50:25 [entrypoint] restart in 5 sec 2025/06/06 08:50:30 [entrypoint] starting `pbm-agent` + cat /tmp/tmp.MzhGIFRPS8 Defaulted container "mongod" out of: mongod, mongo-init (init), pbm-init (init) + rm /tmp/tmp.PHm7PPll6F /tmp/tmp.MzhGIFRPS8 + return 0 + for pod in '$(get_mongod_pods ${cluster})' + desc 'pbm-agent logs from some-name-rs1-2' + set +o xtrace ----------------------------------------------------------------------------------- pbm-agent logs from some-name-rs1-2 ----------------------------------------------------------------------------------- + kubectl_bin exec some-name-rs1-2 -- cat /tmp/pbm-agent.log ++ mktemp + local LAST_OUT=/tmp/tmp.4CF7DKRLub ++ mktemp + local LAST_ERR=/tmp/tmp.QdiPQ0PpwV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec some-name-rs1-2 -- cat /tmp/pbm-agent.log + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4CF7DKRLub 2025/06/06 08:42:06 [entrypoint] starting `pbm-agent` 2025-06-06T08:42:10.000+0000 I % _____ %%% | __ \ ###%%%%%%%%%%%%* | |__) |__ _ __ ___ ___ _ __ __ _ ### ##%% %%%% | ___/ _ \ '__/ __/ _ \| '_ \ / _` | #### ##% %%%% | | | __/ | | (_| (_) | | | | (_| | ### #### %%% |_| \___|_| \___\___/|_| |_|\__,_| ,((### ### %%% _____ _ (((( (### #### %%%% / ____| | | ((( ((# ###### | (___ __ _ _ _ __ _ __| | (((( (((# #### \___ \ / _` | | | |/ _` |/ _` | /(( ,((( *### ____) | (_| | |_| | (_| | (_| | //// ((( #### |_____/ \__, |\__,_|\__,_|\__,_| /// (((( #### | | /////////////(((((((((((((((((######## |_| Join @ squad.percona.com/mongodb ** Join Percona Squad! ** Participate in monthly SWAG raffles, get early access to new product features, invite-only ”ask me anything” sessions with database performance experts. Interested? Fill in the form at squad.percona.com/mongodb 2025-06-06T08:42:10.000+0000 I log options: log-path=/dev/stderr, log-level:D, log-json:false 2025-06-06T08:42:10.000+0000 I pbm-agent: Version: 2.9.1 Platform: linux/amd64 GitCommit: 1f8d60145ed5e7b834c6d7c5318446071bc04ecb GitBranch: release-2.9.1 BuildTime: 2025-03-25_11:35_UTC GoVersion: go1.22.8 2025-06-06T08:42:10.000+0000 I starting PITR routine 2025-06-06T08:42:10.000+0000 I node: rs1/some-name-rs1-2.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017 2025-06-06T08:42:10.000+0000 I conn level ReadConcern: majority; WriteConcern: majority 2025-06-06T08:42:10.000+0000 I listening for the commands 2025-06-06T08:46:49.000+0000 I got command restore [name: 2025-06-06T08:46:49.801096496Z, snapshot: 2025-06-06T08:29:58Z point-in-time: <1749198759,0>] , opid: 6842aaf9fb2c99b9a202e303 2025-06-06T08:46:49.000+0000 I got epoch {1749198979 5} 2025-06-06T08:46:49.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] to time: 2025-06-06T08:32:39Z 2025-06-06T08:46:49.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] backup: 2025-06-06T08:29:58Z 2025-06-06T08:46:49.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] recovery started 2025-06-06T08:46:49.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] port: 28065 2025-06-06T08:46:49.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/node.some-name-rs1-2.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:49.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/rs.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:49.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/cluster.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] mongod binary: mongod, version: v7.0.18-11 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] moving to state starting 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/node.some-name-rs1-2.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017.starting" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for cluster 2025-06-06T08:47:05.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] converged to state starting 2025-06-06T08:47:05.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] starting 2025-06-06T08:47:05.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] moving to state running 2025-06-06T08:47:05.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/node.some-name-rs1-2.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017.running" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:47:05.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for cluster 2025-06-06T08:47:20.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] converged to state running 2025-06-06T08:47:20.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] send to stopAgent chan 2025-06-06T08:47:20.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] stop agents heartbeats 2025-06-06T08:47:20.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] stopping mongod and flushing old data 2025-06-06T08:47:20.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] shutdown server 2025-06-06T08:47:36.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting for the node to shutdown 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove old data 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-54-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-2-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-63-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-48-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-61-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-19-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-8-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-59-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-6-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-9-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-28-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-20-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/mongod.lock 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-12-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/lost+found 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-42-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-11-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-38-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-1-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-65-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/storage.bson 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-13-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-57-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTigerHS.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-18-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-50-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-55-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-30-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-67-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-7-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-39-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-53-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-31-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-26-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/_mdb_catalog.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-4-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-25-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-51-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-36-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-0-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-62-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-40-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-35-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-44-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-33-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-49-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-5-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger.lock 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-21-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-15-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-52-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/sizeStorer.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-58-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-45-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-68-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/key.db 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-22-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-14-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger.turtle 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/mongod-data 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-64-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-24-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/diagnostic.data 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-23-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-34-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-56-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-46-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-41-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/journal 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-60-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-3-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-37-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-43-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/psmdb_telemetry.data 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-10-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-32-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-66-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-29-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-27-342674069238100246.wt 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-47-342674069238100246.wt 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copying backup data 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] download max buf 1073741824 (arena 268435456, span 33554432, concurrency 4) 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/key.db/WiredTigerLog.0000000001.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/key.db/WiredTigerHS.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/key.db/key.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/key.db/parameters.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/key.db/WiredTiger.backup.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/key.db/WiredTiger.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/journal/WiredTigerLog.0000000001.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/WiredTigerHS.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/_mdb_catalog.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-0-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-10-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-12-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-14-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-18-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-19-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-2-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-22-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-24-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-27-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-28-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-31-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-34-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-36-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-39-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-4-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-42-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-45-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-48-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-51-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-54-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-56-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-58-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-6-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-60-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-62-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-64-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-66-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/collection-8-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-1-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-11-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-13-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-15-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-20-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-21-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-23-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-25-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-26-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-29-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-3-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-30-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-32-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-33-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-35-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-37-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-38-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-40-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-41-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-43-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-44-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-46-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-47-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-49-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-5-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-50-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-52-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-53-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-55-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-57-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-59-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-61-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-63-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-65-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-67-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-68-342674069238100246.wt.gz> to 2025-06-06T08:47:37.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-7-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/index-9-342674069238100246.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/sizeStorer.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/WiredTiger.backup.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/WiredTiger.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/storage.bson.gz.0-114> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/journal/WiredTigerLog.0000000002.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/key.db/WiredTigerLog.0000000002.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs1/key.db/WiredTigerLog.0000000001.gz> to 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] download stat: buf 1073741824, arena 268435456, span 33554432, spanNum 8, cc 4, [{1 0} {1 0} {0 0} {1 0}] 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] preparing data 2025-06-06T08:47:42.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] oplogTruncateAfterPoint: {1749198601 5} 2025-06-06T08:47:43.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] recovering oplog as standalone 2025-06-06T08:47:47.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] clean-up and reset replicaset config 2025-06-06T08:47:51.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] restore on node succeed 2025-06-06T08:47:51.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] moving to state done 2025-06-06T08:47:51.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/node.some-name-rs1-2.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017.done" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:47:51.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for `done` status in rs map[.pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/node.some-name-rs1-0.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/node.some-name-rs1-1.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/node.some-name-rs1-2.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017:{}] 2025-06-06T08:48:26.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/rs.done" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:26.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for shards map[.pbm.restore/2025-06-06T08:46:49.801096496Z/rs.cfg/rs:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/rs:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/rs:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/rs:{}] 2025-06-06T08:48:49.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/node.some-name-rs1-2.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:49.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/rs.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:49.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/cluster.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:49:11.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/cluster.done" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:49:11.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for cluster 2025-06-06T08:49:16.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] converged to state done 2025-06-06T08:49:16.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/stat.some-name-rs1-2.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017" [size hint: 73 (73.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:49:16.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] writing restore meta 2025-06-06T08:49:16.000+0000 W [restore/2025-06-06T08:46:49.801096496Z] meta `.pbm.restore/2025-06-06T08:46:49.801096496Z.json` already exists, trying write done status with '' 2025-06-06T08:49:16.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] rm tmp conf 2025-06-06T08:49:16.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] rm tmp logs 2025-06-06T08:49:16.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] recovery successfully finished 2025-06-06T08:49:16.000+0000 I change stream was closed 2025-06-06T08:49:16.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] hearbeats stopped 2025-06-06T08:49:16.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/log/some-name-rs1-2.some-name-rs1.pitr-physical-5149.svc.cluster.local:27017.0.log" [size hint: -1 (unknown); part size: 10485760 (10.00MB)] 2025-06-06T08:49:16.000+0000 I Exit: 2025-06-06T08:49:16.000+0000 E [pitr] init: get conf: get: server selection error: server selection timeout, current topology: { Type: ReplicaSetNoPrimary, Servers: [{ Addr: some-name-cfg-0.some-name-cfg.pitr-physical-5149.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.141.90.80:27017: connect: connection refused }, { Addr: some-name-cfg-1.some-name-cfg.pitr-physical-5149.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.141.89.3:27017: connect: connection refused }, { Addr: some-name-cfg-2.some-name-cfg.pitr-physical-5149.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.141.88.78:27017: connect: connection refused }, ] } 2025/06/06 08:49:16 [entrypoint] `pbm-agent` exited with code 1 2025/06/06 08:49:16 [entrypoint] restart in 5 sec 2025/06/06 08:49:21 [entrypoint] starting `pbm-agent` 2025-06-06T08:49:51.000+0000 E Exit: connect to PBM: create mongo connection: ping: server selection error: server selection timeout, current topology: { Type: Unknown, Servers: [{ Addr: localhost:27017, Type: Unknown, Last error: dial tcp 127.0.0.1:27017: connect: connection refused }, ] } 2025/06/06 08:49:51 [entrypoint] `pbm-agent` exited with code 1 2025/06/06 08:49:51 [entrypoint] restart in 5 sec 2025/06/06 08:49:56 [entrypoint] starting `pbm-agent` 2025-06-06T08:50:26.000+0000 E Exit: connect to PBM: create mongo connection: ping: server selection error: server selection timeout, current topology: { Type: Unknown, Servers: [{ Addr: localhost:27017, Type: Unknown, Last error: dial tcp 127.0.0.1:27017: connect: connection refused }, ] } 2025/06/06 08:50:26 [entrypoint] `pbm-agent` exited with code 1 2025/06/06 08:50:26 [entrypoint] restart in 5 sec 2025/06/06 08:50:31 [entrypoint] starting `pbm-agent` + cat /tmp/tmp.QdiPQ0PpwV Defaulted container "mongod" out of: mongod, mongo-init (init), pbm-init (init) + rm /tmp/tmp.4CF7DKRLub /tmp/tmp.QdiPQ0PpwV + return 0 + for pod in '$(get_mongod_pods ${cluster})' + desc 'pbm-agent logs from some-name-rs2-0' + set +o xtrace ----------------------------------------------------------------------------------- pbm-agent logs from some-name-rs2-0 ----------------------------------------------------------------------------------- + kubectl_bin exec some-name-rs2-0 -- cat /tmp/pbm-agent.log ++ mktemp + local LAST_OUT=/tmp/tmp.OZgync9GRF ++ mktemp + local LAST_ERR=/tmp/tmp.OOSKGW2D9E + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec some-name-rs2-0 -- cat /tmp/pbm-agent.log + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OZgync9GRF 2025/06/06 08:45:53 [entrypoint] starting `pbm-agent` 2025-06-06T08:45:57.000+0000 I % _____ %%% | __ \ ###%%%%%%%%%%%%* | |__) |__ _ __ ___ ___ _ __ __ _ ### ##%% %%%% | ___/ _ \ '__/ __/ _ \| '_ \ / _` | #### ##% %%%% | | | __/ | | (_| (_) | | | | (_| | ### #### %%% |_| \___|_| \___\___/|_| |_|\__,_| ,((### ### %%% _____ _ (((( (### #### %%%% / ____| | | ((( ((# ###### | (___ __ _ _ _ __ _ __| | (((( (((# #### \___ \ / _` | | | |/ _` |/ _` | /(( ,((( *### ____) | (_| | |_| | (_| | (_| | //// ((( #### |_____/ \__, |\__,_|\__,_|\__,_| /// (((( #### | | /////////////(((((((((((((((((######## |_| Join @ squad.percona.com/mongodb ** Join Percona Squad! ** Participate in monthly SWAG raffles, get early access to new product features, invite-only ”ask me anything” sessions with database performance experts. Interested? Fill in the form at squad.percona.com/mongodb 2025-06-06T08:45:57.000+0000 I log options: log-path=/dev/stderr, log-level:D, log-json:false 2025-06-06T08:45:57.000+0000 I pbm-agent: Version: 2.9.1 Platform: linux/amd64 GitCommit: 1f8d60145ed5e7b834c6d7c5318446071bc04ecb GitBranch: release-2.9.1 BuildTime: 2025-03-25_11:35_UTC GoVersion: go1.22.8 2025-06-06T08:45:57.000+0000 I starting PITR routine 2025-06-06T08:45:57.000+0000 I node: rs2/some-name-rs2-0.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017 2025-06-06T08:45:57.000+0000 I conn level ReadConcern: majority; WriteConcern: majority 2025-06-06T08:45:57.000+0000 I listening for the commands 2025-06-06T08:46:50.000+0000 I got command restore [name: 2025-06-06T08:46:49.801096496Z, snapshot: 2025-06-06T08:29:58Z point-in-time: <1749198759,0>] , opid: 6842aaf9fb2c99b9a202e303 2025-06-06T08:46:50.000+0000 I got epoch {1749199610 54} 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] to time: 2025-06-06T08:32:39Z 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] oplog slicer disabled 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] backup: 2025-06-06T08:29:58Z 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] recovery started 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] port: 28034 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-0.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/rs.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/cluster.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] mongod binary: mongod, version: v7.0.18-11 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] moving to state starting 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-0.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017.starting" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for `starting` status in rs map[.pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-0.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-1.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-2.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017:{}] 2025-06-06T08:46:55.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/rs.starting" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:55.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for cluster 2025-06-06T08:47:00.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] converged to state starting 2025-06-06T08:47:00.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] starting 2025-06-06T08:47:00.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] moving to state running 2025-06-06T08:47:00.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-0.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017.running" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:47:00.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for `running` status in rs map[.pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-0.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-1.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-2.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017:{}] 2025-06-06T08:47:05.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/rs.running" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:47:05.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for cluster 2025-06-06T08:47:15.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] converged to state running 2025-06-06T08:47:15.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] send to stopAgent chan 2025-06-06T08:47:16.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] stop agents heartbeats 2025-06-06T08:47:16.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] stopping mongod and flushing old data 2025-06-06T08:47:16.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] shutdown server 2025-06-06T08:47:16.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:17.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:18.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:19.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:20.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:21.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:22.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:23.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:24.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:25.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:26.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:27.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:28.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:29.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:30.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:31.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:32.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:33.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:34.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:35.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:36.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:39.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:40.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:41.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:42.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:43.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:47:44.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting to became secondary 2025-06-06T08:48:01.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting for the node to shutdown 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/rs.down" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove old data 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/psmdb_telemetry.data 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-1--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-40--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-31--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-17--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-19--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-61--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/diagnostic.data 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-2--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-21--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-63--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-11--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-29--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-34--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/journal 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger.lock 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-12--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-48--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-50--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTigerHS.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger.turtle 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-14--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-35--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-52--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-47--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-26--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-28--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-36--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-30--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-25--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-60--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-37--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-53--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-18--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-4--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/lost+found 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-16--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-3--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-57--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-8--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-38--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-55--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-62--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/mongod-data 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-10--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-49--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-46--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/_mdb_catalog.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-5--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-41--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-39--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-24--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-13--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/storage.bson 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-64--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-23--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-42--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-43--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-54--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-59--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-32--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-56--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-45--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-22--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/key.db 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/mongod.lock 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/sizeStorer.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-9--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-0--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-44--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-33--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-27--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-58--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-15--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-51--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-7--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-6--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-20--3343970089367830340.wt 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copying backup data 2025-06-06T08:48:02.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] download max buf 1073741824 (arena 268435456, span 33554432, concurrency 4) 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/key.db/WiredTigerLog.0000000001.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/key.db/WiredTigerHS.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/key.db/key.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/key.db/parameters.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/key.db/WiredTiger.backup.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/key.db/WiredTiger.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/journal/WiredTigerLog.0000000001.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/WiredTigerHS.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/_mdb_catalog.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-0-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-10-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-12-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-14-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-18-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-19-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-2-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-22-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-24-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-27-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-29-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-31-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-34-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-37-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-4-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-40-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-43-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-46-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-47-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-50-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-53-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-56-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-58-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-6-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-60-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-62-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-8-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-1-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-11-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-13-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-15-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-20-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-21-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-23-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-25-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-26-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-28-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-3-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-30-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-32-7255838504681023155.wt.gz> to 2025-06-06T08:48:02.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-33-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-35-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-36-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-38-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-39-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-41-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-42-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-44-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-45-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-48-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-49-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-5-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-51-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-52-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-54-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-55-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-57-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-59-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-61-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-63-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-64-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-7-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-9-7255838504681023155.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/sizeStorer.wt.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/WiredTiger.backup.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/WiredTiger.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/storage.bson.gz.0-114> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/journal/WiredTigerLog.0000000002.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/key.db/WiredTigerLog.0000000001.gz> to 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/key.db/WiredTigerLog.0000000002.gz> to 2025-06-06T08:48:03.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] download stat: buf 1073741824, arena 268435456, span 33554432, spanNum 8, cc 4, [{1 0} {1 0} {1 0} {1 0}] 2025-06-06T08:48:03.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] preparing data 2025-06-06T08:48:06.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] oplogTruncateAfterPoint: {1749198601 5} 2025-06-06T08:48:07.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] recovering oplog as standalone 2025-06-06T08:48:11.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] replaying pitr oplog 2025-06-06T08:48:19.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/rs.txn" [size hint: 2 (2.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:20.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] clean-up and reset replicaset config 2025-06-06T08:48:24.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] restore on node succeed 2025-06-06T08:48:24.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] moving to state done 2025-06-06T08:48:24.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-0.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017.done" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:24.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for `done` status in rs map[.pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-0.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-1.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-2.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017:{}] 2025-06-06T08:48:29.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/rs.done" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:29.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for shards map[.pbm.restore/2025-06-06T08:46:49.801096496Z/rs.cfg/rs:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/rs:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/rs:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/rs:{}] 2025-06-06T08:48:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-0.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/rs.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/cluster.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:49:09.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/cluster.done" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:49:09.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for cluster 2025-06-06T08:49:14.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] converged to state done 2025-06-06T08:49:14.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/stat.some-name-rs2-0.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017" [size hint: 73 (73.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:49:14.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] writing restore meta 2025-06-06T08:49:14.000+0000 W [restore/2025-06-06T08:46:49.801096496Z] meta `.pbm.restore/2025-06-06T08:46:49.801096496Z.json` already exists, trying write done status with '' 2025-06-06T08:49:14.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] rm tmp conf 2025-06-06T08:49:14.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] rm tmp logs 2025-06-06T08:49:14.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] recovery successfully finished 2025-06-06T08:49:14.000+0000 I change stream was closed 2025-06-06T08:49:14.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] hearbeats stopped 2025-06-06T08:49:14.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/log/some-name-rs2-0.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017.0.log" [size hint: -1 (unknown); part size: 10485760 (10.00MB)] 2025-06-06T08:49:14.000+0000 D [agentCheckup] deleting agent status 2025-06-06T08:49:14.000+0000 I Exit: 2025/06/06 08:49:14 [entrypoint] `pbm-agent` exited with code 1 2025/06/06 08:49:14 [entrypoint] restart in 5 sec 2025/06/06 08:49:19 [entrypoint] starting `pbm-agent` 2025-06-06T08:49:49.000+0000 E Exit: connect to PBM: create mongo connection: ping: server selection error: server selection timeout, current topology: { Type: Unknown, Servers: [{ Addr: localhost:27017, Type: Unknown, Last error: dial tcp 127.0.0.1:27017: connect: connection refused }, ] } 2025/06/06 08:49:49 [entrypoint] `pbm-agent` exited with code 1 2025/06/06 08:49:49 [entrypoint] restart in 5 sec 2025/06/06 08:49:54 [entrypoint] starting `pbm-agent` 2025-06-06T08:50:24.000+0000 E Exit: connect to PBM: create mongo connection: ping: server selection error: server selection timeout, current topology: { Type: Unknown, Servers: [{ Addr: localhost:27017, Type: Unknown, Last error: dial tcp 127.0.0.1:27017: connect: connection refused }, ] } 2025/06/06 08:50:24 [entrypoint] `pbm-agent` exited with code 1 2025/06/06 08:50:24 [entrypoint] restart in 5 sec 2025/06/06 08:50:29 [entrypoint] starting `pbm-agent` + cat /tmp/tmp.OOSKGW2D9E Defaulted container "mongod" out of: mongod, rs-sidecar-1, mongo-init (init), pbm-init (init) + rm /tmp/tmp.OZgync9GRF /tmp/tmp.OOSKGW2D9E + return 0 + for pod in '$(get_mongod_pods ${cluster})' + desc 'pbm-agent logs from some-name-rs2-1' + set +o xtrace ----------------------------------------------------------------------------------- pbm-agent logs from some-name-rs2-1 ----------------------------------------------------------------------------------- + kubectl_bin exec some-name-rs2-1 -- cat /tmp/pbm-agent.log ++ mktemp + local LAST_OUT=/tmp/tmp.OjeDNTn4gd ++ mktemp + local LAST_ERR=/tmp/tmp.yuGpsajfny + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec some-name-rs2-1 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.OjeDNTn4gd + cat /tmp/tmp.yuGpsajfny Defaulted container "mongod" out of: mongod, rs-sidecar-1, mongo-init (init), pbm-init (init) error: Internal error occurred: unable to upgrade connection: container not found ("mongod") + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec some-name-rs2-1 -- cat /tmp/pbm-agent.log + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.OjeDNTn4gd + cat /tmp/tmp.yuGpsajfny Defaulted container "mongod" out of: mongod, rs-sidecar-1, mongo-init (init), pbm-init (init) error: Internal error occurred: unable to upgrade connection: container not found ("mongod") + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec some-name-rs2-1 -- cat /tmp/pbm-agent.log + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OjeDNTn4gd 2025/06/06 08:50:43 [entrypoint] starting `pbm-agent` + cat /tmp/tmp.yuGpsajfny Defaulted container "mongod" out of: mongod, rs-sidecar-1, mongo-init (init), pbm-init (init) + rm /tmp/tmp.OjeDNTn4gd /tmp/tmp.yuGpsajfny + return 0 + for pod in '$(get_mongod_pods ${cluster})' + desc 'pbm-agent logs from some-name-rs2-2' + set +o xtrace ----------------------------------------------------------------------------------- pbm-agent logs from some-name-rs2-2 ----------------------------------------------------------------------------------- + kubectl_bin exec some-name-rs2-2 -- cat /tmp/pbm-agent.log ++ mktemp + local LAST_OUT=/tmp/tmp.oueVqEsfTX ++ mktemp + local LAST_ERR=/tmp/tmp.jlMdTrQKAI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec some-name-rs2-2 -- cat /tmp/pbm-agent.log + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oueVqEsfTX 2025/06/06 08:44:25 [entrypoint] starting `pbm-agent` 2025-06-06T08:44:29.000+0000 I % _____ %%% | __ \ ###%%%%%%%%%%%%* | |__) |__ _ __ ___ ___ _ __ __ _ ### ##%% %%%% | ___/ _ \ '__/ __/ _ \| '_ \ / _` | #### ##% %%%% | | | __/ | | (_| (_) | | | | (_| | ### #### %%% |_| \___|_| \___\___/|_| |_|\__,_| ,((### ### %%% _____ _ (((( (### #### %%%% / ____| | | ((( ((# ###### | (___ __ _ _ _ __ _ __| | (((( (((# #### \___ \ / _` | | | |/ _` |/ _` | /(( ,((( *### ____) | (_| | |_| | (_| | (_| | //// ((( #### |_____/ \__, |\__,_|\__,_|\__,_| /// (((( #### | | /////////////(((((((((((((((((######## |_| Join @ squad.percona.com/mongodb ** Join Percona Squad! ** Participate in monthly SWAG raffles, get early access to new product features, invite-only ”ask me anything” sessions with database performance experts. Interested? Fill in the form at squad.percona.com/mongodb 2025-06-06T08:44:29.000+0000 I log options: log-path=/dev/stderr, log-level:D, log-json:false 2025-06-06T08:44:29.000+0000 I pbm-agent: Version: 2.9.1 Platform: linux/amd64 GitCommit: 1f8d60145ed5e7b834c6d7c5318446071bc04ecb GitBranch: release-2.9.1 BuildTime: 2025-03-25_11:35_UTC GoVersion: go1.22.8 2025-06-06T08:44:29.000+0000 I starting PITR routine 2025-06-06T08:44:29.000+0000 I node: rs2/some-name-rs2-2.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017 2025-06-06T08:44:29.000+0000 I conn level ReadConcern: majority; WriteConcern: majority 2025-06-06T08:44:29.000+0000 I listening for the commands 2025-06-06T08:46:50.000+0000 I got command restore [name: 2025-06-06T08:46:49.801096496Z, snapshot: 2025-06-06T08:29:58Z point-in-time: <1749198759,0>] , opid: 6842aaf9fb2c99b9a202e303 2025-06-06T08:46:50.000+0000 I got epoch {1749199610 54} 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] to time: 2025-06-06T08:32:39Z 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] backup: 2025-06-06T08:29:58Z 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] recovery started 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] port: 27904 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-2.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/rs.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/cluster.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] mongod binary: mongod, version: v7.0.18-11 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] moving to state starting 2025-06-06T08:46:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-2.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017.starting" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:46:50.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for cluster 2025-06-06T08:47:05.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] converged to state starting 2025-06-06T08:47:05.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] starting 2025-06-06T08:47:05.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] moving to state running 2025-06-06T08:47:05.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-2.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017.running" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:47:05.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for cluster 2025-06-06T08:47:20.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] converged to state running 2025-06-06T08:47:20.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] send to stopAgent chan 2025-06-06T08:47:21.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] stop agents heartbeats 2025-06-06T08:47:21.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] stopping mongod and flushing old data 2025-06-06T08:47:21.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] shutdown server 2025-06-06T08:47:37.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] waiting for the node to shutdown 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove old data 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-18--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/lost+found 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-8--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-1--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/_mdb_catalog.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-56--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-0--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger.lock 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-36--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-55--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-43--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-59--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-19--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-51--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-29--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/key.db 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-37--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-13--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/storage.bson 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-7--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-34--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-12--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-20--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-27--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-58--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-63--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/mongod.lock 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/diagnostic.data 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-48--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-50--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-33--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/journal 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-25--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-41--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-28--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-6--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-11--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-4--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-40--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-21--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-46--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-14--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-47--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-53--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-2--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-30--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-44--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-42--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-61--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-32--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-64--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTigerHS.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-57--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-31--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/sizeStorer.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-10--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-22--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-49--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-62--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-52--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-35--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-9--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/WiredTiger.turtle 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-39--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-3--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-54--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-45--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-23--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/psmdb_telemetry.data 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-24--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-5--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-26--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-38--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/collection-60--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/mongod-data 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] remove /data/db/index-15--4640858817726418061.wt 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copying backup data 2025-06-06T08:47:38.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] download max buf 1073741824 (arena 268435456, span 33554432, concurrency 4) 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/key.db/WiredTigerLog.0000000001.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/key.db/WiredTigerHS.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/key.db/key.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/key.db/parameters.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/key.db/WiredTiger.backup.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/key.db/WiredTiger.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/journal/WiredTigerLog.0000000001.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/WiredTigerHS.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/_mdb_catalog.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-0-7255838504681023155.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-10-7255838504681023155.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-12-7255838504681023155.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-14-7255838504681023155.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-18-7255838504681023155.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-19-7255838504681023155.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-2-7255838504681023155.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-22-7255838504681023155.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-24-7255838504681023155.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-27-7255838504681023155.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-29-7255838504681023155.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-31-7255838504681023155.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-34-7255838504681023155.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-37-7255838504681023155.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-4-7255838504681023155.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-40-7255838504681023155.wt.gz> to 2025-06-06T08:47:38.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-43-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-46-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-47-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-50-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-53-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-56-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-58-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-6-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-60-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-62-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/collection-8-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-1-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-11-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-13-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-15-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-20-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-21-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-23-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-25-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-26-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-28-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-3-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-30-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-32-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-33-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-35-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-36-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-38-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-39-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-41-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-42-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-44-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-45-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-48-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-49-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-5-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-51-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-52-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-54-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-55-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-57-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-59-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-61-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-63-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-64-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-7-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/index-9-7255838504681023155.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/sizeStorer.wt.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/WiredTiger.backup.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/WiredTiger.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/storage.bson.gz.0-114> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/journal/WiredTigerLog.0000000002.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/key.db/WiredTigerLog.0000000001.gz> to 2025-06-06T08:47:39.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] copy <2025-06-06T08:29:58Z/rs2/key.db/WiredTigerLog.0000000002.gz> to 2025-06-06T08:47:39.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] download stat: buf 1073741824, arena 268435456, span 33554432, spanNum 8, cc 4, [{1 0} {1 0} {1 0} {1 0}] 2025-06-06T08:47:40.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] preparing data 2025-06-06T08:47:43.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] oplogTruncateAfterPoint: {1749198601 5} 2025-06-06T08:47:44.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] recovering oplog as standalone 2025-06-06T08:47:47.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] clean-up and reset replicaset config 2025-06-06T08:47:51.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] restore on node succeed 2025-06-06T08:47:51.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] moving to state done 2025-06-06T08:47:51.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-2.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017.done" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:47:51.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for `done` status in rs map[.pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-0.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-1.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-2.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017:{}] 2025-06-06T08:48:26.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/rs.done" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:26.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for shards map[.pbm.restore/2025-06-06T08:46:49.801096496Z/rs.cfg/rs:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs0/rs:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs1/rs:{} .pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/rs:{}] 2025-06-06T08:48:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/node.some-name-rs2-2.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/rs.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:48:50.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/cluster.hb" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:49:11.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/cluster.done" [size hint: 10 (10.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:49:11.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] waiting for cluster 2025-06-06T08:49:16.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] converged to state done 2025-06-06T08:49:16.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/stat.some-name-rs2-2.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017" [size hint: 73 (73.00B); part size: 10485760 (10.00MB)] 2025-06-06T08:49:16.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] writing restore meta 2025-06-06T08:49:16.000+0000 W [restore/2025-06-06T08:46:49.801096496Z] meta `.pbm.restore/2025-06-06T08:46:49.801096496Z.json` already exists, trying write done status with '' 2025-06-06T08:49:16.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] rm tmp conf 2025-06-06T08:49:16.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] rm tmp logs 2025-06-06T08:49:16.000+0000 I [restore/2025-06-06T08:46:49.801096496Z] recovery successfully finished 2025-06-06T08:49:16.000+0000 I change stream was closed 2025-06-06T08:49:16.000+0000 D [restore/2025-06-06T08:46:49.801096496Z] uploading ".pbm.restore/2025-06-06T08:46:49.801096496Z/rs.rs2/log/some-name-rs2-2.some-name-rs2.pitr-physical-5149.svc.cluster.local:27017.0.log" [size hint: -1 (unknown); part size: 10485760 (10.00MB)] 2025-06-06T08:49:16.000+0000 I Exit: 2025/06/06 08:49:16 [entrypoint] `pbm-agent` exited with code 1 2025/06/06 08:49:16 [entrypoint] restart in 5 sec 2025/06/06 08:49:21 [entrypoint] starting `pbm-agent` 2025-06-06T08:49:51.000+0000 E Exit: connect to PBM: create mongo connection: ping: server selection error: server selection timeout, current topology: { Type: Unknown, Servers: [{ Addr: localhost:27017, Type: Unknown, Last error: dial tcp 127.0.0.1:27017: connect: connection refused }, ] } 2025/06/06 08:49:51 [entrypoint] `pbm-agent` exited with code 1 2025/06/06 08:49:51 [entrypoint] restart in 5 sec 2025/06/06 08:49:56 [entrypoint] starting `pbm-agent` 2025-06-06T08:50:26.000+0000 E Exit: connect to PBM: create mongo connection: ping: server selection error: server selection timeout, current topology: { Type: Unknown, Servers: [{ Addr: localhost:27017, Type: Unknown, Last error: dial tcp 127.0.0.1:27017: connect: connection refused }, ] } 2025/06/06 08:50:26 [entrypoint] `pbm-agent` exited with code 1 2025/06/06 08:50:26 [entrypoint] restart in 5 sec 2025/06/06 08:50:31 [entrypoint] starting `pbm-agent` + cat /tmp/tmp.jlMdTrQKAI Defaulted container "mongod" out of: mongod, rs-sidecar-1, mongo-init (init), pbm-init (init) + rm /tmp/tmp.oueVqEsfTX /tmp/tmp.jlMdTrQKAI + return 0 + grep restore-backup-minio-2 + tail -100 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.FlZWmQaBUG +++ mktemp ++ local LAST_ERR=/tmp/tmp.BwReVGEj4Q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FlZWmQaBUG ++ cat /tmp/tmp.BwReVGEj4Q ++ rm /tmp/tmp.FlZWmQaBUG /tmp/tmp.BwReVGEj4Q ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-d49c7dc6d-bqg5z ++ mktemp + local LAST_OUT=/tmp/tmp.yHgktJGSDp ++ mktemp + local LAST_ERR=/tmp/tmp.Nn3WMuakJl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-d49c7dc6d-bqg5z + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yHgktJGSDp + cat /tmp/tmp.Nn3WMuakJl + rm /tmp/tmp.yHgktJGSDp /tmp/tmp.Nn3WMuakJl + return 0 2025-06-06T08:48:28.329Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "83e1173a-cedb-43fb-9ebb-45553bdde3ac", "status": {"type":"physical","opid":"","name":"2025-06-06T08:46:49.801096496Z","replsets":[{"name":"rs2","start_ts":0,"status":"done","last_transition_ts":1749199706,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null},{"name":"cfg","start_ts":0,"status":"running","last_transition_ts":1749199630,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null},{"name":"rs0","start_ts":0,"status":"done","last_transition_ts":1749199703,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null},{"name":"rs1","start_ts":0,"status":"done","last_transition_ts":1749199706,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":1749199635,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"running","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-06-06T08:48:33.330Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "4682f6ee-1f82-408e-a747-25098b02ec6e", "cluster": "some-name", "namespace": "pitr-physical-5149"} 2025-06-06T08:48:33.332Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "4682f6ee-1f82-408e-a747-25098b02ec6e", "pod": "some-name-rs0-0"} 2025-06-06T08:48:33.332Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "4682f6ee-1f82-408e-a747-25098b02ec6e", "pod": "some-name-rs0-2"} 2025-06-06T08:48:33.332Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "4682f6ee-1f82-408e-a747-25098b02ec6e", "pod": "some-name-rs0-1"} 2025-06-06T08:48:33.332Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "4682f6ee-1f82-408e-a747-25098b02ec6e", "sts": "some-name-rs0", "replset": "rs0"} 2025-06-06T08:48:33.332Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "4682f6ee-1f82-408e-a747-25098b02ec6e", "pod": "some-name-rs1-1"} 2025-06-06T08:48:33.332Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "4682f6ee-1f82-408e-a747-25098b02ec6e", "pod": "some-name-rs1-2"} 2025-06-06T08:48:33.332Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "4682f6ee-1f82-408e-a747-25098b02ec6e", "pod": "some-name-rs1-0"} 2025-06-06T08:48:33.332Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "4682f6ee-1f82-408e-a747-25098b02ec6e", "sts": "some-name-rs1", "replset": "rs1"} 2025-06-06T08:48:33.332Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "4682f6ee-1f82-408e-a747-25098b02ec6e", "pod": "some-name-rs2-0"} 2025-06-06T08:48:33.332Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "4682f6ee-1f82-408e-a747-25098b02ec6e", "pod": "some-name-rs2-1"} 2025-06-06T08:48:33.332Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "4682f6ee-1f82-408e-a747-25098b02ec6e", "pod": "some-name-rs2-2"} 2025-06-06T08:48:33.332Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "4682f6ee-1f82-408e-a747-25098b02ec6e", "sts": "some-name-rs2", "replset": "rs2"} 2025-06-06T08:48:33.332Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "4682f6ee-1f82-408e-a747-25098b02ec6e", "sts": "some-name-cfg", "replset": "cfg"} 2025-06-06T08:48:33.332Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "4682f6ee-1f82-408e-a747-25098b02ec6e", "command": ["/opt/percona/pbm", "describe-restore", "2025-06-06T08:46:49.801096496Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-06-06T08:48:33.867Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "4682f6ee-1f82-408e-a747-25098b02ec6e", "status": {"type":"physical","opid":"","name":"2025-06-06T08:46:49.801096496Z","replsets":[{"name":"cfg","start_ts":0,"status":"running","last_transition_ts":1749199630,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null},{"name":"rs0","start_ts":0,"status":"done","last_transition_ts":1749199703,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null},{"name":"rs1","start_ts":0,"status":"done","last_transition_ts":1749199709,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null},{"name":"rs2","start_ts":0,"status":"done","last_transition_ts":1749199709,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":1749199635,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"running","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-06-06T08:48:38.868Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "2faab9e6-d4c7-460f-a1d1-f83288f91f73", "cluster": "some-name", "namespace": "pitr-physical-5149"} 2025-06-06T08:48:38.870Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "2faab9e6-d4c7-460f-a1d1-f83288f91f73", "pod": "some-name-rs0-2"} 2025-06-06T08:48:38.870Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "2faab9e6-d4c7-460f-a1d1-f83288f91f73", "pod": "some-name-rs0-1"} 2025-06-06T08:48:38.870Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "2faab9e6-d4c7-460f-a1d1-f83288f91f73", "pod": "some-name-rs0-0"} 2025-06-06T08:48:38.870Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "2faab9e6-d4c7-460f-a1d1-f83288f91f73", "sts": "some-name-rs0", "replset": "rs0"} 2025-06-06T08:48:38.870Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "2faab9e6-d4c7-460f-a1d1-f83288f91f73", "pod": "some-name-rs1-2"} 2025-06-06T08:48:38.870Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "2faab9e6-d4c7-460f-a1d1-f83288f91f73", "pod": "some-name-rs1-0"} 2025-06-06T08:48:38.871Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "2faab9e6-d4c7-460f-a1d1-f83288f91f73", "pod": "some-name-rs1-1"} 2025-06-06T08:48:38.871Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "2faab9e6-d4c7-460f-a1d1-f83288f91f73", "sts": "some-name-rs1", "replset": "rs1"} 2025-06-06T08:48:38.871Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "2faab9e6-d4c7-460f-a1d1-f83288f91f73", "pod": "some-name-rs2-2"} 2025-06-06T08:48:38.871Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "2faab9e6-d4c7-460f-a1d1-f83288f91f73", "pod": "some-name-rs2-0"} 2025-06-06T08:48:38.871Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "2faab9e6-d4c7-460f-a1d1-f83288f91f73", "pod": "some-name-rs2-1"} 2025-06-06T08:48:38.871Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "2faab9e6-d4c7-460f-a1d1-f83288f91f73", "sts": "some-name-rs2", "replset": "rs2"} 2025-06-06T08:48:38.871Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "2faab9e6-d4c7-460f-a1d1-f83288f91f73", "sts": "some-name-cfg", "replset": "cfg"} 2025-06-06T08:48:38.871Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "2faab9e6-d4c7-460f-a1d1-f83288f91f73", "command": ["/opt/percona/pbm", "describe-restore", "2025-06-06T08:46:49.801096496Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-06-06T08:48:39.182Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "2faab9e6-d4c7-460f-a1d1-f83288f91f73", "status": {"type":"physical","opid":"","name":"2025-06-06T08:46:49.801096496Z","replsets":[{"name":"cfg","start_ts":0,"status":"running","last_transition_ts":1749199630,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null},{"name":"rs0","start_ts":0,"status":"done","last_transition_ts":1749199703,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null},{"name":"rs1","start_ts":0,"status":"done","last_transition_ts":1749199709,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null},{"name":"rs2","start_ts":0,"status":"done","last_transition_ts":1749199709,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":1749199635,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"running","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-06-06T08:48:44.183Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "f8ee290f-2c20-4d15-8286-6f3ae9494de7", "cluster": "some-name", "namespace": "pitr-physical-5149"} 2025-06-06T08:48:44.184Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "f8ee290f-2c20-4d15-8286-6f3ae9494de7", "pod": "some-name-rs0-0"} 2025-06-06T08:48:44.184Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "f8ee290f-2c20-4d15-8286-6f3ae9494de7", "pod": "some-name-rs0-2"} 2025-06-06T08:48:44.184Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "f8ee290f-2c20-4d15-8286-6f3ae9494de7", "pod": "some-name-rs0-1"} 2025-06-06T08:48:44.184Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "f8ee290f-2c20-4d15-8286-6f3ae9494de7", "sts": "some-name-rs0", "replset": "rs0"} 2025-06-06T08:48:44.185Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "f8ee290f-2c20-4d15-8286-6f3ae9494de7", "pod": "some-name-rs1-0"} 2025-06-06T08:48:44.185Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "f8ee290f-2c20-4d15-8286-6f3ae9494de7", "pod": "some-name-rs1-1"} 2025-06-06T08:48:44.185Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "f8ee290f-2c20-4d15-8286-6f3ae9494de7", "pod": "some-name-rs1-2"} 2025-06-06T08:48:44.185Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "f8ee290f-2c20-4d15-8286-6f3ae9494de7", "sts": "some-name-rs1", "replset": "rs1"} 2025-06-06T08:48:44.185Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "f8ee290f-2c20-4d15-8286-6f3ae9494de7", "pod": "some-name-rs2-2"} 2025-06-06T08:48:44.185Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "f8ee290f-2c20-4d15-8286-6f3ae9494de7", "pod": "some-name-rs2-0"} 2025-06-06T08:48:44.185Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "f8ee290f-2c20-4d15-8286-6f3ae9494de7", "pod": "some-name-rs2-1"} 2025-06-06T08:48:44.185Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "f8ee290f-2c20-4d15-8286-6f3ae9494de7", "sts": "some-name-rs2", "replset": "rs2"} 2025-06-06T08:48:44.185Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "f8ee290f-2c20-4d15-8286-6f3ae9494de7", "sts": "some-name-cfg", "replset": "cfg"} 2025-06-06T08:48:44.185Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "f8ee290f-2c20-4d15-8286-6f3ae9494de7", "command": ["/opt/percona/pbm", "describe-restore", "2025-06-06T08:46:49.801096496Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-06-06T08:48:44.489Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "f8ee290f-2c20-4d15-8286-6f3ae9494de7", "status": {"type":"physical","opid":"","name":"2025-06-06T08:46:49.801096496Z","replsets":[{"name":"rs0","start_ts":0,"status":"done","last_transition_ts":1749199703,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null},{"name":"rs1","start_ts":0,"status":"done","last_transition_ts":1749199709,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null},{"name":"rs2","start_ts":0,"status":"done","last_transition_ts":1749199709,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null},{"name":"cfg","start_ts":0,"status":"running","last_transition_ts":1749199630,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":1749199635,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"running","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-06-06T08:48:49.489Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "cdc43ec5-9e71-4ca0-899e-e94ea3ec5578", "cluster": "some-name", "namespace": "pitr-physical-5149"} 2025-06-06T08:48:49.491Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "cdc43ec5-9e71-4ca0-899e-e94ea3ec5578", "pod": "some-name-rs0-0"} 2025-06-06T08:48:49.491Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "cdc43ec5-9e71-4ca0-899e-e94ea3ec5578", "pod": "some-name-rs0-2"} 2025-06-06T08:48:49.491Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "cdc43ec5-9e71-4ca0-899e-e94ea3ec5578", "pod": "some-name-rs0-1"} 2025-06-06T08:48:49.491Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "cdc43ec5-9e71-4ca0-899e-e94ea3ec5578", "sts": "some-name-rs0", "replset": "rs0"} 2025-06-06T08:48:49.491Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "cdc43ec5-9e71-4ca0-899e-e94ea3ec5578", "pod": "some-name-rs1-2"} 2025-06-06T08:48:49.491Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "cdc43ec5-9e71-4ca0-899e-e94ea3ec5578", "pod": "some-name-rs1-0"} 2025-06-06T08:48:49.491Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "cdc43ec5-9e71-4ca0-899e-e94ea3ec5578", "pod": "some-name-rs1-1"} 2025-06-06T08:48:49.491Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "cdc43ec5-9e71-4ca0-899e-e94ea3ec5578", "sts": "some-name-rs1", "replset": "rs1"} 2025-06-06T08:48:49.491Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "cdc43ec5-9e71-4ca0-899e-e94ea3ec5578", "pod": "some-name-rs2-2"} 2025-06-06T08:48:49.491Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "cdc43ec5-9e71-4ca0-899e-e94ea3ec5578", "pod": "some-name-rs2-0"} 2025-06-06T08:48:49.491Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "cdc43ec5-9e71-4ca0-899e-e94ea3ec5578", "pod": "some-name-rs2-1"} 2025-06-06T08:48:49.491Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "cdc43ec5-9e71-4ca0-899e-e94ea3ec5578", "sts": "some-name-rs2", "replset": "rs2"} 2025-06-06T08:48:49.491Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "cdc43ec5-9e71-4ca0-899e-e94ea3ec5578", "sts": "some-name-cfg", "replset": "cfg"} 2025-06-06T08:48:49.491Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "cdc43ec5-9e71-4ca0-899e-e94ea3ec5578", "command": ["/opt/percona/pbm", "describe-restore", "2025-06-06T08:46:49.801096496Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-06-06T08:48:50.194Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "cdc43ec5-9e71-4ca0-899e-e94ea3ec5578", "status": {"type":"physical","opid":"","name":"2025-06-06T08:46:49.801096496Z","replsets":[{"name":"cfg","start_ts":0,"status":"down","last_transition_ts":1749199727,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null},{"name":"rs0","start_ts":0,"status":"done","last_transition_ts":1749199703,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null},{"name":"rs1","start_ts":0,"status":"done","last_transition_ts":1749199709,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null},{"name":"rs2","start_ts":0,"status":"done","last_transition_ts":1749199709,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":1749199635,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"running","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-06-06T08:48:55.195Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "d48928c8-d328-4efb-98e0-6f0c75f7bd2b", "cluster": "some-name", "namespace": "pitr-physical-5149"} 2025-06-06T08:48:55.197Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "d48928c8-d328-4efb-98e0-6f0c75f7bd2b", "pod": "some-name-rs0-1"} 2025-06-06T08:48:55.197Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "d48928c8-d328-4efb-98e0-6f0c75f7bd2b", "pod": "some-name-rs0-0"} 2025-06-06T08:48:55.197Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "d48928c8-d328-4efb-98e0-6f0c75f7bd2b", "pod": "some-name-rs0-2"} 2025-06-06T08:48:55.197Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "d48928c8-d328-4efb-98e0-6f0c75f7bd2b", "sts": "some-name-rs0", "replset": "rs0"} 2025-06-06T08:48:55.198Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "d48928c8-d328-4efb-98e0-6f0c75f7bd2b", "pod": "some-name-rs1-1"} 2025-06-06T08:48:55.198Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "d48928c8-d328-4efb-98e0-6f0c75f7bd2b", "pod": "some-name-rs1-2"} 2025-06-06T08:48:55.198Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "d48928c8-d328-4efb-98e0-6f0c75f7bd2b", "pod": "some-name-rs1-0"} 2025-06-06T08:48:55.198Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "d48928c8-d328-4efb-98e0-6f0c75f7bd2b", "sts": "some-name-rs1", "replset": "rs1"} 2025-06-06T08:48:55.198Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "d48928c8-d328-4efb-98e0-6f0c75f7bd2b", "pod": "some-name-rs2-2"} 2025-06-06T08:48:55.198Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "d48928c8-d328-4efb-98e0-6f0c75f7bd2b", "pod": "some-name-rs2-0"} 2025-06-06T08:48:55.198Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "d48928c8-d328-4efb-98e0-6f0c75f7bd2b", "pod": "some-name-rs2-1"} 2025-06-06T08:48:55.198Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "d48928c8-d328-4efb-98e0-6f0c75f7bd2b", "sts": "some-name-rs2", "replset": "rs2"} 2025-06-06T08:48:55.198Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "d48928c8-d328-4efb-98e0-6f0c75f7bd2b", "sts": "some-name-cfg", "replset": "cfg"} 2025-06-06T08:48:55.198Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "d48928c8-d328-4efb-98e0-6f0c75f7bd2b", "command": ["/opt/percona/pbm", "describe-restore", "2025-06-06T08:46:49.801096496Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-06-06T08:48:55.545Z DEBUG PBM restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "d48928c8-d328-4efb-98e0-6f0c75f7bd2b", "status": {"type":"physical","opid":"","name":"2025-06-06T08:46:49.801096496Z","replsets":[{"name":"cfg","start_ts":0,"status":"down","last_transition_ts":1749199727,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null},{"name":"rs0","start_ts":0,"status":"done","last_transition_ts":1749199703,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null},{"name":"rs1","start_ts":0,"status":"done","last_transition_ts":1749199709,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null},{"name":"rs2","start_ts":0,"status":"done","last_transition_ts":1749199709,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"node":"","conditions":null}],"compression":"","store":{"type":""},"size":0,"mongodb_version":"","fcv":"","start_ts":0,"last_transition_ts":1749199635,"first_write_ts":{"T":0,"I":0},"last_write_ts":{"T":0,"I":0},"hb":{"T":0,"I":0},"status":"running","conditions":null,"n":null,"pbm_version":"","balancer":""}} 2025-06-06T08:49:00.546Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "9aad1956-3cce-469a-891b-8d19e86cc33d", "cluster": "some-name", "namespace": "pitr-physical-5149"} 2025-06-06T08:49:00.548Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "9aad1956-3cce-469a-891b-8d19e86cc33d", "pod": "some-name-rs0-2"} 2025-06-06T08:49:00.548Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "9aad1956-3cce-469a-891b-8d19e86cc33d", "pod": "some-name-rs0-1"} 2025-06-06T08:49:00.548Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "9aad1956-3cce-469a-891b-8d19e86cc33d", "pod": "some-name-rs0-0"} 2025-06-06T08:49:00.548Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "9aad1956-3cce-469a-891b-8d19e86cc33d", "sts": "some-name-rs0", "replset": "rs0"} 2025-06-06T08:49:00.548Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "9aad1956-3cce-469a-891b-8d19e86cc33d", "pod": "some-name-rs1-0"} 2025-06-06T08:49:00.548Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "9aad1956-3cce-469a-891b-8d19e86cc33d", "pod": "some-name-rs1-1"} 2025-06-06T08:49:00.548Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "9aad1956-3cce-469a-891b-8d19e86cc33d", "pod": "some-name-rs1-2"} 2025-06-06T08:49:00.548Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "9aad1956-3cce-469a-891b-8d19e86cc33d", "sts": "some-name-rs1", "replset": "rs1"} 2025-06-06T08:49:00.548Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "9aad1956-3cce-469a-891b-8d19e86cc33d", "pod": "some-name-rs2-2"} 2025-06-06T08:49:00.548Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "9aad1956-3cce-469a-891b-8d19e86cc33d", "pod": "some-name-rs2-0"} 2025-06-06T08:49:00.548Z DEBUG Pod is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "9aad1956-3cce-469a-891b-8d19e86cc33d", "pod": "some-name-rs2-1"} 2025-06-06T08:49:00.548Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "9aad1956-3cce-469a-891b-8d19e86cc33d", "sts": "some-name-rs2", "replset": "rs2"} 2025-06-06T08:49:00.548Z DEBUG Statefulset is ready for physical restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "9aad1956-3cce-469a-891b-8d19e86cc33d", "sts": "some-name-cfg", "replset": "cfg"} 2025-06-06T08:49:00.548Z DEBUG Check restore status {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "9aad1956-3cce-469a-891b-8d19e86cc33d", "command": ["/opt/percona/pbm", "describe-restore", "2025-06-06T08:46:49.801096496Z", "--config", "/etc/pbm/pbm_config.yaml", "--out", "json"], "pod": "some-name-rs0-0"} 2025-06-06T08:49:00.901Z ERROR failed to make restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "9aad1956-3cce-469a-891b-8d19e86cc33d", "restore": "restore-backup-minio-2", "backup": "backup-minio-2", "error": "unmarshal PBM describe-restore output: unexpected end of JSON input", "errorVerbose": "unexpected end of JSON input\nunmarshal PBM describe-restore output\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore.(*ReconcilePerconaServerMongoDBRestore).reconcilePhysicalRestore\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore/physical.go:225\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore.(*ReconcilePerconaServerMongoDBRestore).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore/perconaservermongodbrestore_controller.go:250\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.21.0/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.21.0/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.21.0/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.21.0/pkg/internal/controller/controller.go:202\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1700"} 2025-06-06T08:49:00.901Z INFO Restore state changed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "9aad1956-3cce-469a-891b-8d19e86cc33d", "previous": "running", "current": "error"} 2025-06-06T08:49:01.523Z INFO Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "9aad1956-3cce-469a-891b-8d19e86cc33d"} 2025-06-06T08:49:01.523Z ERROR Reconciler error {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"}, "namespace": "pitr-physical-5149", "name": "restore-backup-minio-2", "reconcileID": "9aad1956-3cce-469a-891b-8d19e86cc33d", "error": "reconcile physical restore: unmarshal PBM describe-restore output: unexpected end of JSON input", "errorVerbose": "unexpected end of JSON input\nunmarshal PBM describe-restore output\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore.(*ReconcilePerconaServerMongoDBRestore).reconcilePhysicalRestore\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore/physical.go:225\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore.(*ReconcilePerconaServerMongoDBRestore).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore/perconaservermongodbrestore_controller.go:250\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.21.0/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.21.0/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.21.0/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.21.0/pkg/internal/controller/controller.go:202\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1700\nreconcile physical restore\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore.(*ReconcilePerconaServerMongoDBRestore).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore/perconaservermongodbrestore_controller.go:252\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.21.0/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.21.0/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.21.0/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.21.0/pkg/internal/controller/controller.go:202\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1700"} + kubectl_bin get psmdb-restore restore-backup-minio-2 -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.ERbu8fUVSz ++ mktemp + local LAST_ERR=/tmp/tmp.8DrVBHjha9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get psmdb-restore restore-backup-minio-2 -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ERbu8fUVSz apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDBRestore metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDBRestore","metadata":{"annotations":{},"name":"restore-backup-minio-2","namespace":"pitr-physical-5149"},"spec":{"backupName":"backup-minio-2","clusterName":"some-name","pitr":{"date":"2025-06-06 08:32:39","type":"date"}}} creationTimestamp: "2025-06-06T08:35:16Z" generation: 1 name: restore-backup-minio-2 namespace: pitr-physical-5149 resourceVersion: "1749199740915023023" uid: 92412b3f-278f-4e3e-a668-00b39dcb3e25 spec: backupName: backup-minio-2 clusterName: some-name pitr: date: "2025-06-06 08:32:39" type: date status: error: 'unmarshal PBM describe-restore output: unexpected end of JSON input' pbmName: "2025-06-06T08:46:49.801096496Z" pitrTarget: 2025-06-06T08:32:39 state: error + cat /tmp/tmp.8DrVBHjha9 + rm /tmp/tmp.ERbu8fUVSz /tmp/tmp.8DrVBHjha9 + return 0 + kubectl_bin describe psmdb-restore restore-backup-minio-2 ++ mktemp + local LAST_OUT=/tmp/tmp.1gcHFen0BG ++ mktemp + local LAST_ERR=/tmp/tmp.wKlsDVdsXE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl describe psmdb-restore restore-backup-minio-2 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1gcHFen0BG Name: restore-backup-minio-2 Namespace: pitr-physical-5149 Labels: Annotations: API Version: psmdb.percona.com/v1 Kind: PerconaServerMongoDBRestore Metadata: Creation Timestamp: 2025-06-06T08:35:16Z Generation: 1 Resource Version: 1749199740915023023 UID: 92412b3f-278f-4e3e-a668-00b39dcb3e25 Spec: Backup Name: backup-minio-2 Cluster Name: some-name Pitr: Date: 2025-06-06 08:32:39 Type: date Status: Error: unmarshal PBM describe-restore output: unexpected end of JSON input Pbm Name: 2025-06-06T08:46:49.801096496Z Pitr Target: 2025-06-06T08:32:39 State: error Events: + cat /tmp/tmp.wKlsDVdsXE + rm /tmp/tmp.1gcHFen0BG /tmp/tmp.wKlsDVdsXE + return 0 + echo 'Restore object restore-backup-minio-2 is in error state after 3 minutes.' Restore object restore-backup-minio-2 is in error state after 3 minutes. + echo something went wrong with operator or kubernetes cluster something went wrong with operator or kubernetes cluster + exit 1