Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/logs/pitr-physical-backup-source.log grep: warning: stray \ before - Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 + main + create_infra pitr-physical-backup-source-16105 + local ns=pitr-physical-backup-source-16105 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.bvfSsMsSbI ++ mktemp + local LAST_ERR=/tmp/tmp.Om1WTWL5M3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bvfSsMsSbI + cat /tmp/tmp.Om1WTWL5M3 + rm /tmp/tmp.bvfSsMsSbI /tmp/tmp.Om1WTWL5M3 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.WUSiC4mu2W ++ mktemp + local LAST_ERR=/tmp/tmp.lBGXx6fvxf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WUSiC4mu2W + cat /tmp/tmp.lBGXx6fvxf + rm /tmp/tmp.WUSiC4mu2W /tmp/tmp.lBGXx6fvxf + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.MWJIyWCx4T ++ mktemp + local LAST_ERR=/tmp/tmp.Vj4L5lNY8r + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MWJIyWCx4T + cat /tmp/tmp.Vj4L5lNY8r + rm /tmp/tmp.MWJIyWCx4T /tmp/tmp.Vj4L5lNY8r + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com seerror: the serverclusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created deployment.apps/percona-server-mongodb-operator created waiting for pod/percona-server-mongodb-operator-78c57f9588-92ptf to be ready.OK Print operat+ exit_status=1 +2025-12-18T18:43:01.329Z INFO setup Manager starting up {"gitCommit": "4461031b25c11e5b29d77d465c058b936127aac2", "gitBranch": "PR-2152-4461031b", "buildTime": "", "goVersion": "go1.25.5", "os": "linux", "arch": "amd64"} ----------------------------------------------------------------------------------- destr+ exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.5YQJNpYbkI + cat /tmp/tmp.MCGOYPcg1k error: timed out waiting for the condition on customresourcedefinitions/perconaservermongodbs.psmdb.percona.com + sleep 4 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.5YQJNpYbkI + cat /tmp/tmp.MCGOYPcg1k error: timed out waiting for the condition on customresourcedefinitions/perconaservermongodbs.psmdb.percona.com + sleep 8 + cat /tmp/tmp.5YQJNpYbkI + cat /tmp/tmp.MCGOYPcg1k error: timed out waiting for the condition on customresourcedefinitions/perconaservermongodbs.psmdb.percona.com + rm /tmp/tmp.5YQJNpYbkI /tmp/tmp.MCGOYPcg1k + return 1 + : + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.KeG4x7piNh ++ mktemp + local LAST_ERR=/tmp/tmp.Og1oatWpHd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KeG4x7piNh clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.Og1oatWpHd + rm /tmp/tmp.KeG4x7piNh /tmp/tmp.Og1oatWpHd + return 0 + check_crd_for_deletion PR-2152-4461031b + local git_tag=PR-2152-4461031b ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-oNAME: minio-service LAST DEPLOYED: Thu Dec 18 18:44:46 2025 NAMESPACE: pitr-physical-backup-source-23861 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.pitr-physical-backup-source-23861.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace pitr-physical-backup-source-23861 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace pitr-physical-backup-source-23861 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace pitr-physical-backup-source-23861 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace pitr-physical-backup-source-23861 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local error: error executing jsonpath "{.items[].metadata.name}": Error executing template: array index out of bounds: index 0, length 0. Printing more information for debugging the template: template was: {.items[].metadata.name} object given to jsonpath engine was: map[string]interface {}{"apiVersion":"v1", "items":[]interface {}{}, "kind":"List", "metadata":map[string]interface {}{"resourceVersion":""}} error: error executing jsonpath "{.items[].metadata.name}": Error executing template: array index out of bounds: index 0, length 0. Printing more information for debugging the template: template was: {.items[].metadata.name} object given to jsonpath engine was: map[string]interface {}{"apiVersion":"v1", "items":[]interface {}{}, "kind":"List", "metadata":map[string]interface {}{"resourceVersion":""}} error: error executing jsonpath "{.items[].metadata.name}": Error executing template: array index out of bounds: index 0, length 0. Printing more information for debugging the template: template was: {.items[].metadata.name} object given to jsonpath engine was: map[string]interface {}{"apiVersion":"v1", "items":[]interface {}{}, "kind":"List", "metadata":map[string]interface {}{"resourceVersion":""}} error: error executing jsonpath "{.items[].metadata.name}": Error executing template: array index out of bounds: index 0, length 0. Printing more information for debugging the template: template was: {.items[].metadata.name} object given to jsonpath engine was: map[string]interface {}{"apiVersion":"v1", "items":[]interface {}{}, "kind":"List", "metadata":map[string]interface {}{"resourceVersion":""}} + grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + awk '{print$1}' ++ mktemp + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.Er4i0jrSe0 ++ mktemp + local LAST_OUT=/tmp/tmp.FmTrK7WtF3 ++ mktemp + local LAST_ERR=/tmp/tmp.VS54VyIayw + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.3QO53wWAt5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + for i in $(seq 0 2) + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Er4i0jrSe0 + cat /tmp/tmp.3QO53wWAt5 + rm /tmp/tmp.Er4i0jrSe0 /tmp/tmp.3QO53wWAt5 + return 0 namespace "pitr-physical-backup-source-23861" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FmTrK7WtF3 namespace "psmdb-operator" deleted + cat /tmp/tmp.VS54VyIayw + rm /tmp/tmp.FmTrK7WtF3 /tmp/tmp.VS54VyIayw + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.jFcpLajTVG ++ mktemp + local LAST_ERR=/tmp/tmp.NM3M0ch3dV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jFcpLajTVG + cat /tmp/tmp.NM3M0ch3dV + rm /tmp/tmp.jFcpLajTVG /tmp/tmp.NM3M0ch3dV + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.p2n0RNn16o ++ mktemp + local LAST_ERR=/tmp/tmp.kjVcgvw48e + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p2n0RNn16o namespace/psmdb-operator created + cat /tmp/tmp.kjVcgvw48e + rm /tmp/tmp.p2n0RNn16o /tmp/tmp.kjVcgvw48e + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.4AgzynY16i +++ mktemp ++ local LAST_ERR=/tmp/tmp.NmTpJRU6z1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4AgzynY16i ++ cat /tmp/tmp.NmTpJRU6z1 ++ rm /tmp/tmp.4AgzynY16i /tmp/tmp.NmTpJRU6z1 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2152-4461031b-2-cluster6 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.5LGAwmECIY ++ mktemp + local LAST_ERR=/tmp/tmp.KHdls673dT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2152-4461031b-2-cluster6 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5LGAwmECIY Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2152-4461031b-2-cluster6" modified. + cat /tmp/tmp.KHdls673dT + rm /tmp/tmp.5LGAwmECIY /tmp/tmp.KHdls673dT + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2152-4461031b' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2152-4461031b ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/pitr-physical-backup-source/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.PdVzsWwBPe ++ mktemp + local LAST_ERR=/tmp/tmp.gwwsWGuWst + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PdVzsWwBPe customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.gwwsWGuWst + rm /tmp/tmp.PdVzsWwBPe /tmp/tmp.gwwsWGuWst + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.hEuvT79F2y ++ mktemp + local LAST_ERR=/tmp/tmp.c6AmXCnLPf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hEuvT79F2y clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.c6AmXCnLPf + rm /tmp/tmp.hEuvT79F2y /tmp/tmp.c6AmXCnLPf + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2152-4461031b") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.intWVgDeQZ ++ mktemp + local LAST_ERR=/tmp/tmp.5ZIAH4IQEn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.intWVgDeQZ deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.5ZIAH4IQEn + rm /tmp/tmp.intWVgDeQZ /tmp/tmp.5ZIAH4IQEn + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.98627QTcaE +++ mktemp ++ local LAST_ERR=/tmp/tmp.5YcCsdJvjs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.98627QTcaE ++ cat /tmp/tmp.5YcCsdJvjs ++ rm /tmp/tmp.98627QTcaE /tmp/tmp.5YcCsdJvjs ++ return 0 + wait_operator_pod percona-server-mongodb-operator-78c57f9588-9d5d5 + local pod=percona-server-mongodb-operator-78c57f9588-9d5d5 + set +o xtrace waiting for pod/percona-server-mongodb-operator-78c57f9588-9d5d5 to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.58wgCkzfCe +++ mktemp ++ local LAST_ERR=/tmp/tmp.poJ6Rlpz2R ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.58wgCkzfCe ++ cat /tmp/tmp.poJ6Rlpz2R ++ rm /tmp/tmp.58wgCkzfCe /tmp/tmp.poJ6Rlpz2R ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-78c57f9588-9d5d5 ++ mktemp + local LAST_OUT=/tmp/tmp.gwAmUs65oA ++ mktemp + local LAST_ERR=/tmp/tmp.LZQ0u3qQKv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-78c57f9588-9d5d5 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gwAmUs65oA + cat /tmp/tmp.LZQ0u3qQKv + rm /tmp/tmp.gwAmUs65oA /tmp/tmp.LZQ0u3qQKv + return 0 2025-12-18T18:45:32.099Z INFO setup Manager starting up {"gitCommit": "4461031b25c11e5b29d77d465c058b936127aac2", "gitBranch": "PR-2152-4461031b", "buildTime": "", "goVersion": "go1.25.5", "os": "linux", "arch": "amd64"} + create_namespace pitr-physical-backup-source-16105 + local namespace=pitr-physical-backup-source-16105 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' ++ mktemp + local LAST_OUT=/tmp/tmp.RxHJEbuIU8 egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + '[' -n '' ']' + desc 'cleaned up old namespaces pitr-physical-backup-source-16105' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pitr-physical-backup-source-16105 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pitr-physical-backup-source-16105 --ignore-not-found + xargs kubectl delete ns + local LAST_ERR=/tmp/tmp.pbFMidF7p7 + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ mktemp + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_OUT=/tmp/tmp.R3pVq1B4nM ++ mktemp + local LAST_ERR=/tmp/tmp.wXGHeWzhMu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace pitr-physical-backup-source-16105 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RxHJEbuIU8 + cat /tmp/tmp.pbFMidF7p7 + rm /tmp/tmp.RxHJEbuIU8 /tmp/tmp.pbFMidF7p7 + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.R3pVq1B4nM + cat /tmp/tmp.wXGHeWzhMu + rm /tmp/tmp.R3pVq1B4nM /tmp/tmp.wXGHeWzhMu + return 0 + kubectl_bin wait --for=delete namespace pitr-physical-backup-source-16105 ++ mktemp + local LAST_OUT=/tmp/tmp.EsyNib2GJx ++ mktemp + local LAST_ERR=/tmp/tmp.PpZs2ZIUq5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace pitr-physical-backup-source-16105 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EsyNib2GJx + cat /tmp/tmp.PpZs2ZIUq5 + rm /tmp/tmp.EsyNib2GJx /tmp/tmp.PpZs2ZIUq5 + return 0 + desc 'create namespace pitr-physical-backup-source-16105' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pitr-physical-backup-source-16105 ----------------------------------------------------------------------------------- + kubectl_bin create namespace pitr-physical-backup-source-16105 ++ mktemp + local LAST_OUT=/tmp/tmp.7tYPoSaRxU ++ mktemp + local LAST_ERR=/tmp/tmp.xHtctGXsPK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace pitr-physical-backup-source-16105 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7tYPoSaRxU namespace/pitr-physical-backup-source-16105 created + cat /tmp/tmp.xHtctGXsPK + rm /tmp/tmp.7tYPoSaRxU /tmp/tmp.xHtctGXsPK + return 0 + set_kube_ctx pitr-physical-backup-source-16105 + local namespace=pitr-physical-backup-source-16105 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.7G9qofwXH2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.z3A5MIn8tS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7G9qofwXH2 ++ cat /tmp/tmp.z3A5MIn8tS ++ rm /tmp/tmp.7G9qofwXH2 /tmp/tmp.z3A5MIn8tS ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2152-4461031b-2-cluster6 --namespace=pitr-physical-backup-source-16105 ++ mktemp + local LAST_OUT=/tmp/tmp.lltGiJekoN ++ mktemp + local LAST_ERR=/tmp/tmp.qK1gUUEjSy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2152-4461031b-2-cluster6 --namespace=pitr-physical-backup-source-16105 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lltGiJekoN Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2152-4461031b-2-cluster6" modified. + cat /tmp/tmp.qK1gUUEjSy + rm /tmp/tmp.lltGiJekoN /tmp/tmp.qK1gUUEjSy + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Thu Dec 18 18:46:14 2025 NAMESPACE: pitr-physical-backup-source-16105 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.pitr-physical-backup-source-16105.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace pitr-physical-backup-source-16105 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace pitr-physical-backup-source-16105 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace pitr-physical-backup-source-16105 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace pitr-physical-backup-source-16105 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sVLvPgHVOt +++ mktemp ++ local LAST_ERR=/tmp/tmp.u2qeObdodl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sVLvPgHVOt ++ cat /tmp/tmp.u2qeObdodl ++ rm /tmp/tmp.sVLvPgHVOt /tmp/tmp.u2qeObdodl ++ return 0 + MINIO_POD=minio-service-d9589b474-gtmkz + wait_pod minio-service-d9589b474-gtmkz + local pod=minio-service-d9589b474-gtmkz + set +o xtrace waiting for pod/minio-service-d9589b474-gtmkz to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-physical-backup-source-16105.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.CkKd37ulWQ ++ mktemp + local LAST_ERR=/tmp/tmp.yq1E2gqH2M + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-physical-backup-source-16105.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CkKd37ulWQ service/minio-service created + cat /tmp/tmp.yq1E2gqH2M + rm /tmp/tmp.CkKd37ulWQ /tmp/tmp.yq1E2gqH2M + return 0 + create_minio_bucket operator-testing + local bucket=operator-testing + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.09DIYJX1Dt ++ mktemp + local LAST_ERR=/tmp/tmp.p4o3KiVcF0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.09DIYJX1Dt make_bucket: operator-testing pod "aws-cli" deleted from pitr-physical-backup-source-16105 namespace + cat /tmp/tmp.p4o3KiVcF0 + rm /tmp/tmp.09DIYJX1Dt /tmp/tmp.p4o3KiVcF0 + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/conf/minio-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ku0zjQhOur ++ mktemp + local LAST_ERR=/tmp/tmp.7Edg5f1Tuy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/conf/minio-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ku0zjQhOur secret/some-users created deployment.apps/psmdb-client created secret/minio-secret created + cat /tmp/tmp.7Edg5f1Tuy + rm /tmp/tmp.ku0zjQhOur /tmp/tmp.7Edg5f1Tuy + return 0 + cluster=some-name + desc 'create first PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/pitr-physical-backup-source/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/pitr-physical-backup-source/conf/some-name-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/pitr-physical-backup-source/conf/some-name-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2152-4461031b"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.6dj4ZGrA36 + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' ++ mktemp + local LAST_ERR=/tmp/tmp.NUrnPHmDJS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6dj4ZGrA36 perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.NUrnPHmDJS + rm /tmp/tmp.6dj4ZGrA36 /tmp/tmp.NUrnPHmDJS + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.....OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready....OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eYSmp4UIG2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.YGkVx03GE6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eYSmp4UIG2 ++ cat /tmp/tmp.YGkVx03GE6 ++ rm /tmp/tmp.eYSmp4UIG2 /tmp/tmp.YGkVx03GE6 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JMP2Da2jhc +++ mktemp ++ local LAST_ERR=/tmp/tmp.nOlpzzHf6a ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JMP2Da2jhc ++ cat /tmp/tmp.nOlpzzHf6a ++ rm /tmp/tmp.JMP2Da2jhc /tmp/tmp.nOlpzzHf6a ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oxFpWA9P2G +++ mktemp ++ local LAST_ERR=/tmp/tmp.y9D9JjF4cD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oxFpWA9P2G ++ cat /tmp/tmp.y9D9JjF4cD ++ rm /tmp/tmp.oxFpWA9P2G /tmp/tmp.y9D9JjF4cD ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.............................................. + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7dmP2hnXja +++ mktemp ++ local LAST_ERR=/tmp/tmp.a2w9MhpVCR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7dmP2hnXja ++ cat /tmp/tmp.a2w9MhpVCR ++ rm /tmp/tmp.7dmP2hnXja /tmp/tmp.a2w9MhpVCR ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2SLsgwY2mH +++ mktemp ++ local LAST_ERR=/tmp/tmp.aLLYzzNbqR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2SLsgwY2mH ++ cat /tmp/tmp.aLLYzzNbqR ++ rm /tmp/tmp.2SLsgwY2mH /tmp/tmp.aLLYzzNbqR ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5tl4TCBFWI +++ mktemp ++ local LAST_ERR=/tmp/tmp.HgyxgDUnwl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5tl4TCBFWI ++ cat /tmp/tmp.HgyxgDUnwl ++ rm /tmp/tmp.5tl4TCBFWI /tmp/tmp.HgyxgDUnwl ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + sleep 10 + write_initial_data + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-16105 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-16105 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.m2CG9szrYW +++ mktemp ++ local LAST_ERR=/tmp/tmp.BbO8XS6Si0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.m2CG9szrYW ++ cat /tmp/tmp.BbO8XS6Si0 ++ rm /tmp/tmp.m2CG9szrYW /tmp/tmp.BbO8XS6Si0 ++ return 0 + local client_container=psmdb-client-696897d69b-9xg4t + kubectl_bin exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.FUOojwAzDX ++ mktemp + local LAST_ERR=/tmp/tmp.Q3DvBsvQPb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FUOojwAzDX Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("62f6f9d1-976e-4901-9be7-a6259aeebf41") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.Q3DvBsvQPb + rm /tmp/tmp.FUOojwAzDX /tmp/tmp.Q3DvBsvQPb + return 0 + sleep 2 + write_document + local cmp_postfix= + local sleep_value=0 + log 'write initial data, read from all' + set +o xtrace [2025-12-18T18:50:30+0000] write initial data, read from all + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ANUOusWSeS +++ mktemp ++ local LAST_ERR=/tmp/tmp.41DdbuLpsg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ANUOusWSeS ++ cat /tmp/tmp.41DdbuLpsg ++ rm /tmp/tmp.ANUOusWSeS /tmp/tmp.41DdbuLpsg ++ return 0 + local client_container=psmdb-client-696897d69b-9xg4t + kubectl_bin exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.OTESBzr2ZL ++ mktemp + local LAST_ERR=/tmp/tmp.bt73T54LBQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OTESBzr2ZL Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("83eec90e-4b36-4686-8c67-e7a5fec9f3e5") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.bt73T54LBQ + rm /tmp/tmp.OTESBzr2ZL /tmp/tmp.bt73T54LBQ + return 0 + sleep 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2025-12-18T18:50:33+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 mongodb '' '' 27017 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' egrep: warning: egrep is obsolescent; using grep -E ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AMCiJ9g9a5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.87SYXOQozs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AMCiJ9g9a5 ++ cat /tmp/tmp.87SYXOQozs ++ rm /tmp/tmp.AMCiJ9g9a5 /tmp/tmp.87SYXOQozs ++ return 0 + local client_container=psmdb-client-696897d69b-9xg4t + kubectl_bin exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.vPEcLk6haI ++ mktemp + local LAST_ERR=/tmp/tmp.VzHGN2PRFl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vPEcLk6haI + cat /tmp/tmp.VzHGN2PRFl + rm /tmp/tmp.vPEcLk6haI /tmp/tmp.VzHGN2PRFl + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/pitr-physical-backup-source/compare/find.json /tmp/tmp.iB4fDcwwkH/find + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-0...2025-12-18T18:49:53.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-1...2025-12-18T18:49:54.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-2...2025-12-18T18:49:53.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-0 + local agent_pod=some-name-rs1-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-0...2025-12-18T18:49:56.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-1 + local agent_pod=some-name-rs1-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-1...2025-12-18T18:49:52.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-2 + local agent_pod=some-name-rs1-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-2...2025-12-18T18:49:53.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-0 + local agent_pod=some-name-rs2-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-0...2025-12-18T18:49:55.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-1 + local agent_pod=some-name-rs2-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-1...2025-12-18T18:49:53.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-2 + local agent_pod=some-name-rs2-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-2...2025-12-18T18:49:54.000+0000 I listening for the commands + echo 'Sleeping for 360 seconds' Sleeping for 360 seconds + sleep 360 + backup_name_minio=backup-minio + desc 'restore pitr type date using backupSource' + set +o xtrace ----------------------------------------------------------------------------------- restore pitr type date using backupSource ----------------------------------------------------------------------------------- + run_backup backup-minio 1 physical + local name=backup-minio + local idx=1 + local type=physical + desc 'run backup backup-minio-1' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-1 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/pitr-physical-backup-source/conf/backup-minio.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-1/' + /usr/sbin/sed -e 's/type:/type: physical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.53AUNA94AO ++ mktemp + local LAST_ERR=/tmp/tmp.XcoMGym7Lj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.53AUNA94AO perconaservermongodbbackup.psmdb.percona.com/backup-minio-1 created + cat /tmp/tmp.XcoMGym7Lj + rm /tmp/tmp.53AUNA94AO /tmp/tmp.XcoMGym7Lj + return 0 + wait_backup backup-minio-1 + local backup_name=backup-minio-1 + local target_state=ready + set +o xtrace waiting for backup-minio-1 to reach ready state.........OK + compare_latest_restorable_time some-name-rs0 backup-minio-1 + local cluster=some-name-rs0 + local backup_name=backup-minio-1 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Top82Zh6Cm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Xn4w1RdLLb +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Top82Zh6Cm +++ cat /tmp/tmp.Xn4w1RdLLb +++ rm /tmp/tmp.Top82Zh6Cm /tmp/tmp.Xn4w1RdLLb +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8OSuhgsZqR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Oyr7afreOt +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.8OSuhgsZqR +++ cat /tmp/tmp.Oyr7afreOt +++ rm /tmp/tmp.8OSuhgsZqR /tmp/tmp.Oyr7afreOt +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 2 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.eifETa7W0T ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5ax09gxOcX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.eifETa7W0T +++ cat /tmp/tmp.5ax09gxOcX +++ rm /tmp/tmp.eifETa7W0T /tmp/tmp.5ax09gxOcX +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 3 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.BsCDCfXseD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Py9oLQG5EP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.BsCDCfXseD +++ cat /tmp/tmp.Py9oLQG5EP +++ rm /tmp/tmp.BsCDCfXseD /tmp/tmp.Py9oLQG5EP +++ return 0 ++ first_timestamp=1766084244 ++ sleep 5 ++ [[ 1766084244 != '' ]] ++ [[ 1766084244 != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1FzhcZw9l0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fQymkq6vbv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.1FzhcZw9l0 +++ cat /tmp/tmp.fQymkq6vbv +++ rm /tmp/tmp.1FzhcZw9l0 /tmp/tmp.fQymkq6vbv +++ return 0 ++ second_timestamp=1766084244 ++ let retry+=1 ++ [[ 4 -gt 30 ]] ++ [[ 1766084244 != '' ]] ++ [[ 1766084244 != \n\u\l\l ]] ++ [[ 1766084244 == 1766084244 ]] ++ /usr/sbin/date -u -d @1766084244 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2025-12-18T18:57:24Z ++ get_latest_restorable_time_from_backup_object backup-minio-1 ++ local backup_name=backup-minio-1 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-1 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Hbja6XhoHt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mmZiL4LTCM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb-backup backup-minio-1 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Hbja6XhoHt +++ cat /tmp/tmp.mmZiL4LTCM +++ rm /tmp/tmp.Hbja6XhoHt /tmp/tmp.mmZiL4LTCM +++ return 0 ++ latestRestorableTime=2025-12-18T18:57:24Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2025-12-18T18:57:24Z != '' ]] ++ [[ 2025-12-18T18:57:24Z != \n\u\l\l ]] ++ echo 2025-12-18T18:57:24Z + backup_time=2025-12-18T18:57:24Z + [[ 2025-12-18T18:57:24Z != \2\0\2\5\-\1\2\-\1\8\T\1\8\:\5\7\:\2\4\Z ]] + reset_collection + desc 'reset data' + set +o xtrace ----------------------------------------------------------------------------------- reset data ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.remove({})' myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local 'command=use myApp\n db.test.remove({})' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ia0MwZgTqG +++ mktemp ++ local LAST_ERR=/tmp/tmp.Qq2zIXEEaC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ia0MwZgTqG ++ cat /tmp/tmp.Qq2zIXEEaC ++ rm /tmp/tmp.Ia0MwZgTqG /tmp/tmp.Qq2zIXEEaC ++ return 0 + local client_container=psmdb-client-696897d69b-9xg4t + kubectl_bin exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.remove({})\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.KBHuq5x6JT ++ mktemp + local LAST_ERR=/tmp/tmp.0TfOhArjbZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.remove({})\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KBHuq5x6JT Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("3399ec89-4c1b-423f-8bbf-7bfa6fe2c485") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nRemoved" : 1 }) bye + cat /tmp/tmp.0TfOhArjbZ + rm /tmp/tmp.KBHuq5x6JT /tmp/tmp.0TfOhArjbZ + return 0 + sleep 2 + write_document '' 120 + local cmp_postfix= + local sleep_value=120 + log 'write initial data, read from all' + set +o xtrace [2025-12-18T18:57:54+0000] write initial data, read from all + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sN1Z6jTaAP +++ mktemp ++ local LAST_ERR=/tmp/tmp.Tpoz4uQDSX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sN1Z6jTaAP ++ cat /tmp/tmp.Tpoz4uQDSX ++ rm /tmp/tmp.sN1Z6jTaAP /tmp/tmp.Tpoz4uQDSX ++ return 0 + local client_container=psmdb-client-696897d69b-9xg4t + kubectl_bin exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.yOHMz0OtjF ++ mktemp + local LAST_ERR=/tmp/tmp.TCVVenorqm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yOHMz0OtjF Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("f91a1215-ae62-48dd-81df-7d636a8638ad") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.TCVVenorqm + rm /tmp/tmp.yOHMz0OtjF /tmp/tmp.TCVVenorqm + return 0 + sleep 120 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2025-12-18T18:59:58+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' egrep: warning: egrep is obsolescent; using grep -E +++ mktemp ++ local LAST_OUT=/tmp/tmp.8TDJVEeCTq +++ mktemp ++ local LAST_ERR=/tmp/tmp.CWR0iNINNg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8TDJVEeCTq ++ cat /tmp/tmp.CWR0iNINNg ++ rm /tmp/tmp.8TDJVEeCTq /tmp/tmp.CWR0iNINNg ++ return 0 + local client_container=psmdb-client-696897d69b-9xg4t + kubectl_bin exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.M5Xgh12CWW ++ mktemp + local LAST_ERR=/tmp/tmp.dcsG60yCCM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.M5Xgh12CWW + cat /tmp/tmp.dcsG60yCCM + rm /tmp/tmp.M5Xgh12CWW /tmp/tmp.dcsG60yCCM + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/pitr-physical-backup-source/compare/find.json /tmp/tmp.iB4fDcwwkH/find ++ run_mongos 'new Date().getTime() / 1000' myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 mongodb '' --quiet ++ local 'command=new Date().getTime() / 1000' ++ local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match' ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local port=27017 ++ local mongo_bin=mongo ++ cut -d. -f1 +++ awk -F: '{print $2}' +++ echo .svc.cluster.local ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pbBJV0C209 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.eEZAo8XJBO +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.pbBJV0C209 +++ cat /tmp/tmp.eEZAo8XJBO +++ rm /tmp/tmp.pbBJV0C209 /tmp/tmp.eEZAo8XJBO +++ return 0 ++ local client_container=psmdb-client-696897d69b-9xg4t ++ kubectl_bin exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''new Date().getTime() / 1000\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.U6yWpzIDbh +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZSmMDQ2HK7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''new Date().getTime() / 1000\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.U6yWpzIDbh ++ cat /tmp/tmp.ZSmMDQ2HK7 ++ rm /tmp/tmp.U6yWpzIDbh /tmp/tmp.ZSmMDQ2HK7 ++ return 0 + time_now=1766084403 + check_recovery backup-minio-1 date 1766084403 '' some-name backupSource + local backup_name=backup-minio-1 + local restore_type=date + local restore_date=1766084403 + local cmp_postfix= + local cluster_name=some-name + local backupSource=backupSource ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gmSwiwFvce ++++ mktemp +++ local LAST_ERR=/tmp/tmp.z7SRwEJiKv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.gmSwiwFvce +++ cat /tmp/tmp.z7SRwEJiKv +++ rm /tmp/tmp.gmSwiwFvce /tmp/tmp.z7SRwEJiKv +++ return 0 ++ echo 1766084370 + local latest_ts=1766084370 + desc 'write more data before restore by date' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by date ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hqeYePLg9d +++ mktemp ++ local LAST_ERR=/tmp/tmp.MV2UMp65b2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hqeYePLg9d ++ cat /tmp/tmp.MV2UMp65b2 ++ rm /tmp/tmp.hqeYePLg9d /tmp/tmp.MV2UMp65b2 ++ return 0 + local client_container=psmdb-client-696897d69b-9xg4t + kubectl_bin exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.6BvDkPQ8dP ++ mktemp + local LAST_ERR=/tmp/tmp.83lp3mDuS9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6BvDkPQ8dP Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("11626f72-ca46-40cf-986b-9acc26a55920") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.83lp3mDuS9 + rm /tmp/tmp.6BvDkPQ8dP /tmp/tmp.83lp3mDuS9 + return 0 + [[ -n 1766084403 ]] ++ format_date 1766084403 ++ local timestamp=1766084403 +++ TZ=UTC +++ /usr/sbin/date -d@1766084403 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:00:03 + desc 'Restoring to time 2025-12-18 19:00:03' + set +o xtrace ----------------------------------------------------------------------------------- Restoring to time 2025-12-18 19:00:03 ----------------------------------------------------------------------------------- + retries=0 + [[ 1766084370 -gt 1766084403 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nRcqd7IO0U ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Wuq506CF4f +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.nRcqd7IO0U +++ cat /tmp/tmp.Wuq506CF4f +++ rm /tmp/tmp.nRcqd7IO0U /tmp/tmp.Wuq506CF4f +++ return 0 ++ echo 1766084370 + latest_ts=1766084370 + retries=1 ++ format_date 1766084370 ++ local timestamp=1766084370 +++ TZ=UTC +++ /usr/sbin/date -d@1766084370 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 18:59:30 ++ format_date 1766084403 ++ local timestamp=1766084403 +++ TZ=UTC +++ /usr/sbin/date -d@1766084403 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:00:03 + echo 'Waiting for last oplog chunk (2025-12-18 18:59:30) to be greater than restore target (2025-12-18 19:00:03)' Waiting for last oplog chunk (2025-12-18 18:59:30) to be greater than restore target (2025-12-18 19:00:03) + sleep 10 + [[ 1766084370 -gt 1766084403 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Br3gXotbNG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pZT9je3Rg4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Br3gXotbNG +++ cat /tmp/tmp.pZT9je3Rg4 +++ rm /tmp/tmp.Br3gXotbNG /tmp/tmp.pZT9je3Rg4 +++ return 0 ++ echo 1766084370 + latest_ts=1766084370 + retries=2 ++ format_date 1766084370 ++ local timestamp=1766084370 +++ TZ=UTC +++ /usr/sbin/date -d@1766084370 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 18:59:30 ++ format_date 1766084403 ++ local timestamp=1766084403 +++ TZ=UTC +++ /usr/sbin/date -d@1766084403 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:00:03 + echo 'Waiting for last oplog chunk (2025-12-18 18:59:30) to be greater than restore target (2025-12-18 19:00:03)' Waiting for last oplog chunk (2025-12-18 18:59:30) to be greater than restore target (2025-12-18 19:00:03) + sleep 10 + [[ 1766084370 -gt 1766084403 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.L05a4wH7pW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LIXDmD134D +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.L05a4wH7pW +++ cat /tmp/tmp.LIXDmD134D +++ rm /tmp/tmp.L05a4wH7pW /tmp/tmp.LIXDmD134D +++ return 0 ++ echo 1766084370 + latest_ts=1766084370 + retries=3 ++ format_date 1766084370 ++ local timestamp=1766084370 +++ TZ=UTC +++ /usr/sbin/date -d@1766084370 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 18:59:30 ++ format_date 1766084403 ++ local timestamp=1766084403 +++ TZ=UTC +++ /usr/sbin/date -d@1766084403 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:00:03 + echo 'Waiting for last oplog chunk (2025-12-18 18:59:30) to be greater than restore target (2025-12-18 19:00:03)' Waiting for last oplog chunk (2025-12-18 18:59:30) to be greater than restore target (2025-12-18 19:00:03) + sleep 10 + [[ 1766084370 -gt 1766084403 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.g24avNuWZw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0ViCtOsTk4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.g24avNuWZw +++ cat /tmp/tmp.0ViCtOsTk4 +++ rm /tmp/tmp.g24avNuWZw /tmp/tmp.0ViCtOsTk4 +++ return 0 ++ echo 1766084370 + latest_ts=1766084370 + retries=4 ++ format_date 1766084370 ++ local timestamp=1766084370 +++ TZ=UTC +++ /usr/sbin/date -d@1766084370 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 18:59:30 ++ format_date 1766084403 ++ local timestamp=1766084403 +++ TZ=UTC +++ /usr/sbin/date -d@1766084403 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:00:03 + echo 'Waiting for last oplog chunk (2025-12-18 18:59:30) to be greater than restore target (2025-12-18 19:00:03)' Waiting for last oplog chunk (2025-12-18 18:59:30) to be greater than restore target (2025-12-18 19:00:03) + sleep 10 + [[ 1766084370 -gt 1766084403 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6RkTDgwJUL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.o9vIGNFjL4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6RkTDgwJUL +++ cat /tmp/tmp.o9vIGNFjL4 +++ rm /tmp/tmp.6RkTDgwJUL /tmp/tmp.o9vIGNFjL4 +++ return 0 ++ echo 1766084370 + latest_ts=1766084370 + retries=5 ++ format_date 1766084370 ++ local timestamp=1766084370 +++ TZ=UTC +++ /usr/sbin/date -d@1766084370 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 18:59:30 ++ format_date 1766084403 ++ local timestamp=1766084403 +++ TZ=UTC +++ /usr/sbin/date -d@1766084403 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:00:03 + echo 'Waiting for last oplog chunk (2025-12-18 18:59:30) to be greater than restore target (2025-12-18 19:00:03)' Waiting for last oplog chunk (2025-12-18 18:59:30) to be greater than restore target (2025-12-18 19:00:03) + sleep 10 + [[ 1766084370 -gt 1766084403 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qVcAg6AQ8b ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CzgEPT2i6p +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.qVcAg6AQ8b +++ cat /tmp/tmp.CzgEPT2i6p +++ rm /tmp/tmp.qVcAg6AQ8b /tmp/tmp.CzgEPT2i6p +++ return 0 ++ echo 1766084370 + latest_ts=1766084370 + retries=6 ++ format_date 1766084370 ++ local timestamp=1766084370 +++ TZ=UTC +++ /usr/sbin/date -d@1766084370 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 18:59:30 ++ format_date 1766084403 ++ local timestamp=1766084403 +++ TZ=UTC +++ /usr/sbin/date -d@1766084403 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:00:03 + echo 'Waiting for last oplog chunk (2025-12-18 18:59:30) to be greater than restore target (2025-12-18 19:00:03)' Waiting for last oplog chunk (2025-12-18 18:59:30) to be greater than restore target (2025-12-18 19:00:03) + sleep 10 + [[ 1766084370 -gt 1766084403 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UgYkuMJf1x ++++ mktemp +++ local LAST_ERR=/tmp/tmp.F5LaC2H8XU +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.UgYkuMJf1x +++ cat /tmp/tmp.F5LaC2H8XU +++ rm /tmp/tmp.UgYkuMJf1x /tmp/tmp.F5LaC2H8XU +++ return 0 ++ echo 1766084370 + latest_ts=1766084370 + retries=7 ++ format_date 1766084370 ++ local timestamp=1766084370 +++ TZ=UTC +++ /usr/sbin/date -d@1766084370 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 18:59:30 ++ format_date 1766084403 ++ local timestamp=1766084403 +++ TZ=UTC +++ /usr/sbin/date -d@1766084403 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:00:03 + echo 'Waiting for last oplog chunk (2025-12-18 18:59:30) to be greater than restore target (2025-12-18 19:00:03)' Waiting for last oplog chunk (2025-12-18 18:59:30) to be greater than restore target (2025-12-18 19:00:03) + sleep 10 + [[ 1766084370 -gt 1766084403 ]] + [[ 7 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.g37daRZR2L ++++ mktemp +++ local LAST_ERR=/tmp/tmp.55cfYKD193 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.g37daRZR2L +++ cat /tmp/tmp.55cfYKD193 +++ rm /tmp/tmp.g37daRZR2L /tmp/tmp.55cfYKD193 +++ return 0 ++ echo 1766084370 + latest_ts=1766084370 + retries=8 ++ format_date 1766084370 ++ local timestamp=1766084370 +++ TZ=UTC +++ /usr/sbin/date -d@1766084370 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 18:59:30 ++ format_date 1766084403 ++ local timestamp=1766084403 +++ TZ=UTC +++ /usr/sbin/date -d@1766084403 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:00:03 + echo 'Waiting for last oplog chunk (2025-12-18 18:59:30) to be greater than restore target (2025-12-18 19:00:03)' Waiting for last oplog chunk (2025-12-18 18:59:30) to be greater than restore target (2025-12-18 19:00:03) + sleep 10 + [[ 1766084370 -gt 1766084403 ]] + [[ 8 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qkjI2ZkdEx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Z7CYqxuTHj +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.qkjI2ZkdEx +++ cat /tmp/tmp.Z7CYqxuTHj +++ rm /tmp/tmp.qkjI2ZkdEx /tmp/tmp.Z7CYqxuTHj +++ return 0 ++ echo 1766084382 + latest_ts=1766084382 + retries=9 ++ format_date 1766084382 ++ local timestamp=1766084382 +++ TZ=UTC +++ /usr/sbin/date -d@1766084382 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 18:59:42 ++ format_date 1766084403 ++ local timestamp=1766084403 +++ TZ=UTC +++ /usr/sbin/date -d@1766084403 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:00:03 + echo 'Waiting for last oplog chunk (2025-12-18 18:59:42) to be greater than restore target (2025-12-18 19:00:03)' Waiting for last oplog chunk (2025-12-18 18:59:42) to be greater than restore target (2025-12-18 19:00:03) + sleep 10 + [[ 1766084382 -gt 1766084403 ]] + [[ 9 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.lwrVRMtcqn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vx0EN8IOn8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.lwrVRMtcqn +++ cat /tmp/tmp.vx0EN8IOn8 +++ rm /tmp/tmp.lwrVRMtcqn /tmp/tmp.vx0EN8IOn8 +++ return 0 ++ echo 1766084491 + latest_ts=1766084491 + retries=10 ++ format_date 1766084491 ++ local timestamp=1766084491 +++ TZ=UTC +++ /usr/sbin/date -d@1766084491 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:31 ++ format_date 1766084403 ++ local timestamp=1766084403 +++ TZ=UTC +++ /usr/sbin/date -d@1766084403 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:00:03 + echo 'Waiting for last oplog chunk (2025-12-18 19:01:31) to be greater than restore target (2025-12-18 19:00:03)' Waiting for last oplog chunk (2025-12-18 19:01:31) to be greater than restore target (2025-12-18 19:00:03) + sleep 10 + [[ 1766084491 -gt 1766084403 ]] + '[' -z backupSource ']' + desc 'check restore by date backupSource' + set +o xtrace ----------------------------------------------------------------------------------- check restore by date backupSource ----------------------------------------------------------------------------------- ++ get_backup_dest backup-minio-1 ++ local backup_name=backup-minio-1 ++ kubectl_bin get psmdb-backup backup-minio-1 -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' +++ mktemp ++ sed 's|s3://||' ++ sed 's|azure://||' ++ local LAST_OUT=/tmp/tmp.32hefGsTSf ++ sed 's|gs://||' +++ mktemp ++ local LAST_ERR=/tmp/tmp.RHHUFp7LbI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-1 -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.32hefGsTSf ++ cat /tmp/tmp.RHHUFp7LbI ++ rm /tmp/tmp.32hefGsTSf /tmp/tmp.RHHUFp7LbI ++ return 0 + backup_dest=operator-testing/2025-12-18T18:56:53Z + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/pitr-physical-backup-source/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-1/' + /usr/sbin/sed -e /backupName/d + '[' -z 1766084403 ']' ++ format_date 1766084403 + /usr/sbin/sed -e 's/pitrType:/type: date/' ++ local timestamp=1766084403 + /usr/sbin/sed -e 's|DESTINATION|operator-testing/2025-12-18T18:56:53Z|' +++ TZ=UTC +++ /usr/sbin/date -d@1766084403 '+%Y-%m-%d %H:%M:%S' + '[' -n '' ']' + yq + kubectl_bin apply -f - ++ get_bucket_name backup-minio-1 ++ local backup_name=backup-minio-1 ++ kubectl_bin get psmdb-backup backup-minio-1 -o 'jsonpath={.status.s3.bucket}' +++ mktemp ++ echo 2025-12-18 19:00:03 + /usr/sbin/sed -e 's/date:/date: 2025-12-18 19:00:03/' ++ local LAST_OUT=/tmp/tmp.sT9huK14Wd +++ mktemp ++ mktemp ++ local LAST_ERR=/tmp/tmp.2ck1qTJNiM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 + local LAST_OUT=/tmp/tmp.S31B6icPKi ++ mktemp ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-1 -o 'jsonpath={.status.s3.bucket}' + local LAST_ERR=/tmp/tmp.KMLvP5JPEF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sT9huK14Wd ++ cat /tmp/tmp.2ck1qTJNiM ++ rm /tmp/tmp.sT9huK14Wd /tmp/tmp.2ck1qTJNiM ++ return 0 + /usr/sbin/sed -e 's|BUCKET-NAME|operator-testing|' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.S31B6icPKi perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-1 created + cat /tmp/tmp.KMLvP5JPEF + rm /tmp/tmp.S31B6icPKi /tmp/tmp.KMLvP5JPEF + return 0 + wait_restore backup-minio-1 some-name requested 0 1200 + local backup_name=backup-minio-1 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-1 object to be created.OK Waiting psmdb-restore/restore-backup-minio-1 to reach state "requested" .........OK after 8 minutes + [[ 0 -eq 1 ]] + echo + wait_restore backup-minio-1 some-name ready 0 1600 + local backup_name=backup-minio-1 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1600 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-1 object to be created.OK Waiting psmdb-restore/restore-backup-minio-1 to reach state "ready" ...OK after 2 minutes + [[ 0 -eq 1 ]] + echo + set -o xtrace + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.v6IwwzC6IH +++ mktemp ++ local LAST_ERR=/tmp/tmp.qlP4YANcZV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v6IwwzC6IH ++ cat /tmp/tmp.qlP4YANcZV ++ rm /tmp/tmp.v6IwwzC6IH /tmp/tmp.qlP4YANcZV ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZB9VMsjjhc +++ mktemp ++ local LAST_ERR=/tmp/tmp.WlfS0M50Wm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZB9VMsjjhc ++ cat /tmp/tmp.WlfS0M50Wm ++ rm /tmp/tmp.ZB9VMsjjhc /tmp/tmp.WlfS0M50Wm ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rBMhwzVALN +++ mktemp ++ local LAST_ERR=/tmp/tmp.iTt3PFoqi2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rBMhwzVALN ++ cat /tmp/tmp.iTt3PFoqi2 ++ rm /tmp/tmp.rBMhwzVALN /tmp/tmp.iTt3PFoqi2 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness............................................ + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fZap1XwaPg +++ mktemp ++ local LAST_ERR=/tmp/tmp.61LRB5j2cs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fZap1XwaPg ++ cat /tmp/tmp.61LRB5j2cs ++ rm /tmp/tmp.fZap1XwaPg /tmp/tmp.61LRB5j2cs ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.y3dqbws9t9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.KEAXkZIc3L ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.y3dqbws9t9 ++ cat /tmp/tmp.KEAXkZIc3L ++ rm /tmp/tmp.y3dqbws9t9 /tmp/tmp.KEAXkZIc3L ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.j7TMLf03o4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.3VwLBelf9Z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.j7TMLf03o4 ++ cat /tmp/tmp.3VwLBelf9Z ++ rm /tmp/tmp.j7TMLf03o4 /tmp/tmp.3VwLBelf9Z ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eTESPafOld +++ mktemp ++ local LAST_ERR=/tmp/tmp.F9BWAxsBHU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eTESPafOld ++ cat /tmp/tmp.F9BWAxsBHU ++ rm /tmp/tmp.eTESPafOld /tmp/tmp.F9BWAxsBHU ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5L8UDDSlGf +++ mktemp ++ local LAST_ERR=/tmp/tmp.IvFZPHhWnc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5L8UDDSlGf ++ cat /tmp/tmp.IvFZPHhWnc ++ rm /tmp/tmp.5L8UDDSlGf /tmp/tmp.IvFZPHhWnc ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AwyywNzHxC +++ mktemp ++ local LAST_ERR=/tmp/tmp.DUGfqXNqk5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AwyywNzHxC ++ cat /tmp/tmp.DUGfqXNqk5 ++ rm /tmp/tmp.AwyywNzHxC /tmp/tmp.DUGfqXNqk5 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 10 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 '' + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2025-12-18T19:15:52+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Q1Y7PlzYLR +++ mktemp ++ local LAST_ERR=/tmp/tmp.nF94m8KNVt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Q1Y7PlzYLR ++ cat /tmp/tmp.nF94m8KNVt ++ rm /tmp/tmp.Q1Y7PlzYLR /tmp/tmp.nF94m8KNVt ++ return 0 + local client_container=psmdb-client-696897d69b-9xg4t + kubectl_bin exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.jbMz2FmM5e ++ mktemp + local LAST_ERR=/tmp/tmp.g42bTOWpQU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jbMz2FmM5e + cat /tmp/tmp.g42bTOWpQU + rm /tmp/tmp.jbMz2FmM5e /tmp/tmp.g42bTOWpQU + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/pitr-physical-backup-source/compare/find.json /tmp/tmp.iB4fDcwwkH/find + desc 'delete PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- delete PSMDB cluster some-name ----------------------------------------------------------------------------------- + kubectl_bin delete psmdb some-name ++ mktemp + local LAST_OUT=/tmp/tmp.sTBWCOHIEE ++ mktemp + local LAST_ERR=/tmp/tmp.QrqvztF58Z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb some-name + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sTBWCOHIEE perconaservermongodb.psmdb.percona.com "some-name" deleted from pitr-physical-backup-source-16105 namespace + cat /tmp/tmp.QrqvztF58Z + rm /tmp/tmp.sTBWCOHIEE /tmp/tmp.QrqvztF58Z + return 0 + kubectl_bin delete pvc -l app.kubernetes.io/managed-by=percona-server-mongodb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.2f0A3gbtDu ++ mktemp + local LAST_ERR=/tmp/tmp.grACTNQdxB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pvc -l app.kubernetes.io/managed-by=percona-server-mongodb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2f0A3gbtDu persistentvolumeclaim "mongod-data-some-name-cfg-0" deleted from pitr-physical-backup-source-16105 namespace persistentvolumeclaim "mongod-data-some-name-cfg-1" deleted from pitr-physical-backup-source-16105 namespace persistentvolumeclaim "mongod-data-some-name-cfg-2" deleted from pitr-physical-backup-source-16105 namespace persistentvolumeclaim "mongod-data-some-name-rs0-0" deleted from pitr-physical-backup-source-16105 namespace persistentvolumeclaim "mongod-data-some-name-rs0-1" deleted from pitr-physical-backup-source-16105 namespace persistentvolumeclaim "mongod-data-some-name-rs0-2" deleted from pitr-physical-backup-source-16105 namespace persistentvolumeclaim "mongod-data-some-name-rs1-0" deleted from pitr-physical-backup-source-16105 namespace persistentvolumeclaim "mongod-data-some-name-rs1-1" deleted from pitr-physical-backup-source-16105 namespace persistentvolumeclaim "mongod-data-some-name-rs1-2" deleted from pitr-physical-backup-source-16105 namespace persistentvolumeclaim "mongod-data-some-name-rs2-0" deleted from pitr-physical-backup-source-16105 namespace persistentvolumeclaim "mongod-data-some-name-rs2-1" deleted from pitr-physical-backup-source-16105 namespace persistentvolumeclaim "mongod-data-some-name-rs2-2" deleted from pitr-physical-backup-source-16105 namespace + cat /tmp/tmp.grACTNQdxB + rm /tmp/tmp.2f0A3gbtDu /tmp/tmp.grACTNQdxB + return 0 + sleep 10 + desc 'recreate PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- recreate PSMDB cluster some-name ----------------------------------------------------------------------------------- + desc 'create second PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- create second PSMDB cluster some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/pitr-physical-backup-source/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/pitr-physical-backup-source/conf/some-name-rs0.yml + kubectl_bin apply -f - ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/pitr-physical-backup-source/conf/some-name-rs0.yml + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2152-4461031b"' + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + local LAST_OUT=/tmp/tmp.uuIeERE28H ++ mktemp + local LAST_ERR=/tmp/tmp.FGqGXF8NS0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uuIeERE28H perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.FGqGXF8NS0 + rm /tmp/tmp.uuIeERE28H /tmp/tmp.FGqGXF8NS0 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dtbModjBB6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.MbLKg98aXV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dtbModjBB6 ++ cat /tmp/tmp.MbLKg98aXV ++ rm /tmp/tmp.dtbModjBB6 /tmp/tmp.MbLKg98aXV ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IF4kGpv7XD +++ mktemp ++ local LAST_ERR=/tmp/tmp.Kg9Q9n5nlo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IF4kGpv7XD ++ cat /tmp/tmp.Kg9Q9n5nlo ++ rm /tmp/tmp.IF4kGpv7XD /tmp/tmp.Kg9Q9n5nlo ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ImaIYRnce8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZqvTNMIjtF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ImaIYRnce8 ++ cat /tmp/tmp.ZqvTNMIjtF ++ rm /tmp/tmp.ImaIYRnce8 /tmp/tmp.ZqvTNMIjtF ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.............................................. + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.drt75wFHvg +++ mktemp ++ local LAST_ERR=/tmp/tmp.CsBT2wYelS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.drt75wFHvg ++ cat /tmp/tmp.CsBT2wYelS ++ rm /tmp/tmp.drt75wFHvg /tmp/tmp.CsBT2wYelS ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Y1wa5HfmC1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.YAZ1sFKblN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Y1wa5HfmC1 ++ cat /tmp/tmp.YAZ1sFKblN ++ rm /tmp/tmp.Y1wa5HfmC1 /tmp/tmp.YAZ1sFKblN ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.80X23jLaHi +++ mktemp ++ local LAST_ERR=/tmp/tmp.oDxbfAhUCG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.80X23jLaHi ++ cat /tmp/tmp.oDxbfAhUCG ++ rm /tmp/tmp.80X23jLaHi /tmp/tmp.oDxbfAhUCG ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-rs1 3 + local name=some-name-rs1 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs1 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs1-0 + local pod=some-name-rs1-0 + set +o xtrace waiting for pod/some-name-rs1-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs1-1 + local pod=some-name-rs1-1 + set +o xtrace waiting for pod/some-name-rs1-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zL32pyPF7M +++ mktemp ++ local LAST_ERR=/tmp/tmp.psmc34gKvl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zL32pyPF7M ++ cat /tmp/tmp.psmc34gKvl ++ rm /tmp/tmp.zL32pyPF7M /tmp/tmp.psmc34gKvl ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs1-2 + local pod=some-name-rs1-2 + set +o xtrace waiting for pod/some-name-rs1-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1CR7UMyDNL +++ mktemp ++ local LAST_ERR=/tmp/tmp.qwRzuVqu1r ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1CR7UMyDNL ++ cat /tmp/tmp.qwRzuVqu1r ++ rm /tmp/tmp.1CR7UMyDNL /tmp/tmp.qwRzuVqu1r ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OHKu0Q0yfc +++ mktemp ++ local LAST_ERR=/tmp/tmp.ueX6YIrMhZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OHKu0Q0yfc ++ cat /tmp/tmp.ueX6YIrMhZ ++ rm /tmp/tmp.OHKu0Q0yfc /tmp/tmp.ueX6YIrMhZ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-rs2 3 + local name=some-name-rs2 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs2 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs2-0 + local pod=some-name-rs2-0 + set +o xtrace waiting for pod/some-name-rs2-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs2-1 + local pod=some-name-rs2-1 + set +o xtrace waiting for pod/some-name-rs2-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zQfrswse66 +++ mktemp ++ local LAST_ERR=/tmp/tmp.D1OgDQ736A ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zQfrswse66 ++ cat /tmp/tmp.D1OgDQ736A ++ rm /tmp/tmp.zQfrswse66 /tmp/tmp.D1OgDQ736A ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs2-2 + local pod=some-name-rs2-2 + set +o xtrace waiting for pod/some-name-rs2-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HkKQ6k97aE +++ mktemp ++ local LAST_ERR=/tmp/tmp.MY1bCCDeto ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HkKQ6k97aE ++ cat /tmp/tmp.MY1bCCDeto ++ rm /tmp/tmp.HkKQ6k97aE /tmp/tmp.MY1bCCDeto ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qowli1B3yo +++ mktemp ++ local LAST_ERR=/tmp/tmp.pzaJIK36yU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qowli1B3yo ++ cat /tmp/tmp.pzaJIK36yU ++ rm /tmp/tmp.qowli1B3yo /tmp/tmp.pzaJIK36yU ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 10 + write_initial_data + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-16105 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-16105 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bVjK0VIK5g +++ mktemp ++ local LAST_ERR=/tmp/tmp.YQdQO941go ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bVjK0VIK5g ++ cat /tmp/tmp.YQdQO941go ++ rm /tmp/tmp.bVjK0VIK5g /tmp/tmp.YQdQO941go ++ return 0 + local client_container=psmdb-client-696897d69b-9xg4t + kubectl_bin exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.hXtSJdUfz3 ++ mktemp + local LAST_ERR=/tmp/tmp.a2nQiHrvTq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hXtSJdUfz3 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("14ddf398-627d-4346-ab2d-6ab8a6544712") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.a2nQiHrvTq + rm /tmp/tmp.hXtSJdUfz3 /tmp/tmp.a2nQiHrvTq + return 0 + sleep 2 + write_document + local cmp_postfix= + local sleep_value=0 + log 'write initial data, read from all' + set +o xtrace [2025-12-18T19:20:49+0000] write initial data, read from all + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pdo4iiwi2B +++ mktemp ++ local LAST_ERR=/tmp/tmp.p3AaneGq7j ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pdo4iiwi2B ++ cat /tmp/tmp.p3AaneGq7j ++ rm /tmp/tmp.pdo4iiwi2B /tmp/tmp.p3AaneGq7j ++ return 0 + local client_container=psmdb-client-696897d69b-9xg4t + kubectl_bin exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.9C9LuApONT ++ mktemp + local LAST_ERR=/tmp/tmp.HVcEoR3HcG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9C9LuApONT Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("636ba2a9-485f-4beb-9e5e-0b8d3c65a18a") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.HVcEoR3HcG + rm /tmp/tmp.9C9LuApONT /tmp/tmp.HVcEoR3HcG + return 0 + sleep 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2025-12-18T19:20:52+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ciV4FnPRVB +++ mktemp ++ local LAST_ERR=/tmp/tmp.BsLf9BoyNm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ciV4FnPRVB ++ cat /tmp/tmp.BsLf9BoyNm ++ rm /tmp/tmp.ciV4FnPRVB /tmp/tmp.BsLf9BoyNm ++ return 0 + local client_container=psmdb-client-696897d69b-9xg4t + kubectl_bin exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.7IWM91pmvv ++ mktemp + local LAST_ERR=/tmp/tmp.H4YNoVIJZ1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7IWM91pmvv + cat /tmp/tmp.H4YNoVIJZ1 + rm /tmp/tmp.7IWM91pmvv /tmp/tmp.H4YNoVIJZ1 + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/pitr-physical-backup-source/compare/find.json /tmp/tmp.iB4fDcwwkH/find + desc 'restore pitr type latest using backupSource' + set +o xtrace ----------------------------------------------------------------------------------- restore pitr type latest using backupSource ----------------------------------------------------------------------------------- + write_document -2nd + local cmp_postfix=-2nd + local sleep_value=0 + log 'write initial data, read from all' + set +o xtrace [2025-12-18T19:20:54+0000] write initial data, read from all + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GDCFAZl87p +++ mktemp ++ local LAST_ERR=/tmp/tmp.2IPFFvpUMF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GDCFAZl87p ++ cat /tmp/tmp.2IPFFvpUMF ++ rm /tmp/tmp.GDCFAZl87p /tmp/tmp.2IPFFvpUMF ++ return 0 + local client_container=psmdb-client-696897d69b-9xg4t + kubectl_bin exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.LAO1yX28rX ++ mktemp + local LAST_ERR=/tmp/tmp.R8HFqr7tmH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LAO1yX28rX Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("f54db147-9f8a-4565-8e12-d1ac84d15e55") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.R8HFqr7tmH + rm /tmp/tmp.LAO1yX28rX /tmp/tmp.R8HFqr7tmH + return 0 + sleep 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 -2nd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2025-12-18T19:20:57+0000] running db.test.command() in myApp + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 mongodb '' '' 27017 + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo egrep: warning: egrep is obsolescent; using grep -E ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wCZBAfSOJz +++ mktemp ++ local LAST_ERR=/tmp/tmp.gDZxYryZqh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wCZBAfSOJz ++ cat /tmp/tmp.gDZxYryZqh ++ rm /tmp/tmp.wCZBAfSOJz /tmp/tmp.gDZxYryZqh ++ return 0 + local client_container=psmdb-client-696897d69b-9xg4t + kubectl_bin exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.ZoQSyY4x2j ++ mktemp + local LAST_ERR=/tmp/tmp.gqkQJXC4c8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZoQSyY4x2j + cat /tmp/tmp.gqkQJXC4c8 + rm /tmp/tmp.ZoQSyY4x2j /tmp/tmp.gqkQJXC4c8 + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/pitr-physical-backup-source/compare/find-2nd.json /tmp/tmp.iB4fDcwwkH/find-2nd + run_backup backup-minio 2 physical + local name=backup-minio + local idx=2 + local type=physical + desc 'run backup backup-minio-2' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-2 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/pitr-physical-backup-source/conf/backup-minio.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-2/' + /usr/sbin/sed -e 's/type:/type: physical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.sx95joPcEr ++ mktemp + local LAST_ERR=/tmp/tmp.yfZJu46Ovb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sx95joPcEr perconaservermongodbbackup.psmdb.percona.com/backup-minio-2 created + cat /tmp/tmp.yfZJu46Ovb + rm /tmp/tmp.sx95joPcEr /tmp/tmp.yfZJu46Ovb + return 0 + wait_backup backup-minio-2 + local backup_name=backup-minio-2 + local target_state=ready + set +o xtrace waiting for backup-minio-2 to reach ready state.........OK + compare_latest_restorable_time some-name-rs0 backup-minio-2 + local cluster=some-name-rs0 + local backup_name=backup-minio-2 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.o35UpzmyNE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fvR2HxZS6i +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.o35UpzmyNE +++ cat /tmp/tmp.fvR2HxZS6i +++ rm /tmp/tmp.o35UpzmyNE /tmp/tmp.fvR2HxZS6i +++ return 0 ++ first_timestamp=1766084494 ++ sleep 5 ++ [[ 1766084494 != '' ]] ++ [[ 1766084494 != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GIEXkD7Vo4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bIFApxu3bz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.GIEXkD7Vo4 +++ cat /tmp/tmp.bIFApxu3bz +++ rm /tmp/tmp.GIEXkD7Vo4 /tmp/tmp.bIFApxu3bz +++ return 0 ++ second_timestamp=1766084494 ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 1766084494 != '' ]] ++ [[ 1766084494 != \n\u\l\l ]] ++ [[ 1766084494 == 1766084494 ]] ++ /usr/sbin/date -u -d @1766084494 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2025-12-18T19:01:34Z ++ get_latest_restorable_time_from_backup_object backup-minio-2 ++ local backup_name=backup-minio-2 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-2 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.i6QlcnpoGY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Tzrld8ymAe +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb-backup backup-minio-2 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.i6QlcnpoGY +++ cat /tmp/tmp.Tzrld8ymAe +++ rm /tmp/tmp.i6QlcnpoGY /tmp/tmp.Tzrld8ymAe +++ return 0 ++ latestRestorableTime=2025-12-18T19:01:34Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2025-12-18T19:01:34Z != '' ]] ++ [[ 2025-12-18T19:01:34Z != \n\u\l\l ]] ++ echo 2025-12-18T19:01:34Z + backup_time=2025-12-18T19:01:34Z + [[ 2025-12-18T19:01:34Z != \2\0\2\5\-\1\2\-\1\8\T\1\9\:\0\1\:\3\4\Z ]] + check_recovery backup-minio-2 latest '' -3rd some-name backupSource + local backup_name=backup-minio-2 + local restore_type=latest + local restore_date= + local cmp_postfix=-3rd + local cluster_name=some-name + local backupSource=backupSource ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ieejVm5WCn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3xkSiE0hVW +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ieejVm5WCn +++ cat /tmp/tmp.3xkSiE0hVW +++ rm /tmp/tmp.ieejVm5WCn /tmp/tmp.3xkSiE0hVW +++ return 0 ++ echo 1766084494 + local latest_ts=1766084494 + desc 'write more data before restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by latest ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8vFRwfyZmV +++ mktemp ++ local LAST_ERR=/tmp/tmp.5Wk0LcFlPL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8vFRwfyZmV ++ cat /tmp/tmp.5Wk0LcFlPL ++ rm /tmp/tmp.8vFRwfyZmV /tmp/tmp.5Wk0LcFlPL ++ return 0 + local client_container=psmdb-client-696897d69b-9xg4t + kubectl_bin exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.5z4UNwmdZ7 ++ mktemp + local LAST_ERR=/tmp/tmp.RMIqEe06A8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5z4UNwmdZ7 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("566dd1f4-9a14-4861-9ef6-3561c3f4b861") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.RMIqEe06A8 + rm /tmp/tmp.5z4UNwmdZ7 /tmp/tmp.RMIqEe06A8 + return 0 + [[ -n '' ]] + desc 'Restoring to latest' + set +o xtrace ----------------------------------------------------------------------------------- Restoring to latest ----------------------------------------------------------------------------------- ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.SVV2HIZkqo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BJ8ON8nmB1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.SVV2HIZkqo +++ cat /tmp/tmp.BJ8ON8nmB1 +++ rm /tmp/tmp.SVV2HIZkqo /tmp/tmp.BJ8ON8nmB1 +++ return 0 ++ echo 1766084494 + local current_ts=1766084494 + retries=0 + [[ 1766084494 -gt 1766084494 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9FlfKydWVB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IMojfwXj46 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.9FlfKydWVB +++ cat /tmp/tmp.IMojfwXj46 +++ rm /tmp/tmp.9FlfKydWVB /tmp/tmp.IMojfwXj46 +++ return 0 ++ echo 1766084494 + latest_ts=1766084494 + retries=1 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 + echo 'Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34)' Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34) + sleep 10 + [[ 1766084494 -gt 1766084494 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Thmcnbod54 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JXBcUv3RzM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Thmcnbod54 +++ cat /tmp/tmp.JXBcUv3RzM +++ rm /tmp/tmp.Thmcnbod54 /tmp/tmp.JXBcUv3RzM +++ return 0 ++ echo 1766084494 + latest_ts=1766084494 + retries=2 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 + echo 'Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34)' Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34) + sleep 10 + [[ 1766084494 -gt 1766084494 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YOj6CG8988 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.chrGgRo4R4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.YOj6CG8988 +++ cat /tmp/tmp.chrGgRo4R4 +++ rm /tmp/tmp.YOj6CG8988 /tmp/tmp.chrGgRo4R4 +++ return 0 ++ echo 1766084494 + latest_ts=1766084494 + retries=3 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 + echo 'Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34)' Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34) + sleep 10 + [[ 1766084494 -gt 1766084494 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kp0SOs17R2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pPrH14i9Ii +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kp0SOs17R2 +++ cat /tmp/tmp.pPrH14i9Ii +++ rm /tmp/tmp.kp0SOs17R2 /tmp/tmp.pPrH14i9Ii +++ return 0 ++ echo 1766084494 + latest_ts=1766084494 + retries=4 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 + echo 'Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34)' Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34) + sleep 10 + [[ 1766084494 -gt 1766084494 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.L4JhWTGCQf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.tM8cXHMDa2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.L4JhWTGCQf +++ cat /tmp/tmp.tM8cXHMDa2 +++ rm /tmp/tmp.L4JhWTGCQf /tmp/tmp.tM8cXHMDa2 +++ return 0 ++ echo 1766084494 + latest_ts=1766084494 + retries=5 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 + echo 'Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34)' Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34) + sleep 10 + [[ 1766084494 -gt 1766084494 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RPwGfThgbB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VmP00XVgiU +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RPwGfThgbB +++ cat /tmp/tmp.VmP00XVgiU +++ rm /tmp/tmp.RPwGfThgbB /tmp/tmp.VmP00XVgiU +++ return 0 ++ echo 1766084494 + latest_ts=1766084494 + retries=6 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 + echo 'Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34)' Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34) + sleep 10 + [[ 1766084494 -gt 1766084494 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.R0E4UopA0v ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SVoT0gygEl +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.R0E4UopA0v +++ cat /tmp/tmp.SVoT0gygEl +++ rm /tmp/tmp.R0E4UopA0v /tmp/tmp.SVoT0gygEl +++ return 0 ++ echo 1766084494 + latest_ts=1766084494 + retries=7 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 + echo 'Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34)' Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34) + sleep 10 + [[ 1766084494 -gt 1766084494 ]] + [[ 7 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3bQMpCV4ar ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SplcQsnKFe +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.3bQMpCV4ar +++ cat /tmp/tmp.SplcQsnKFe +++ rm /tmp/tmp.3bQMpCV4ar /tmp/tmp.SplcQsnKFe +++ return 0 ++ echo 1766084494 + latest_ts=1766084494 + retries=8 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 + echo 'Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34)' Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34) + sleep 10 + [[ 1766084494 -gt 1766084494 ]] + [[ 8 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Gm6OcpjApS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zwQbgqSDkv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Gm6OcpjApS +++ cat /tmp/tmp.zwQbgqSDkv +++ rm /tmp/tmp.Gm6OcpjApS /tmp/tmp.zwQbgqSDkv +++ return 0 ++ echo 1766084494 + latest_ts=1766084494 + retries=9 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 + echo 'Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34)' Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34) + sleep 10 + [[ 1766084494 -gt 1766084494 ]] + [[ 9 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.r6y2xd5wcJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ys4Z9NOoOJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.r6y2xd5wcJ +++ cat /tmp/tmp.ys4Z9NOoOJ +++ rm /tmp/tmp.r6y2xd5wcJ /tmp/tmp.ys4Z9NOoOJ +++ return 0 ++ echo 1766084494 + latest_ts=1766084494 + retries=10 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 + echo 'Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34)' Waiting for last oplog chunk (2025-12-18 19:01:34) to be 120 seconds older than starting chunk (2025-12-18 19:01:34) + sleep 10 + [[ 1766084494 -gt 1766084494 ]] + [[ 10 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.chIB6g3b1l ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yFTSGezmbX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.chIB6g3b1l +++ cat /tmp/tmp.yFTSGezmbX +++ rm /tmp/tmp.chIB6g3b1l /tmp/tmp.yFTSGezmbX +++ return 0 ++ echo 1766085812 + latest_ts=1766085812 + retries=11 ++ format_date 1766085812 ++ local timestamp=1766085812 +++ TZ=UTC +++ /usr/sbin/date -d@1766085812 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:23:32 ++ format_date 1766084494 ++ local timestamp=1766084494 +++ TZ=UTC +++ /usr/sbin/date -d@1766084494 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-18 19:01:34 + echo 'Waiting for last oplog chunk (2025-12-18 19:23:32) to be 120 seconds older than starting chunk (2025-12-18 19:01:34)' Waiting for last oplog chunk (2025-12-18 19:23:32) to be 120 seconds older than starting chunk (2025-12-18 19:01:34) + sleep 10 + [[ 1766085812 -gt 1766084494 ]] + '[' -z backupSource ']' + desc 'check restore by latest backupSource' + set +o xtrace ----------------------------------------------------------------------------------- check restore by latest backupSource ----------------------------------------------------------------------------------- ++ get_backup_dest backup-minio-2 ++ local backup_name=backup-minio-2 ++ kubectl_bin get psmdb-backup backup-minio-2 -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DACPr1zCgZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.K5i8OZ4woS ++ local exit_status=0 ++ local timeout=4 ++ sed 's|gs://||' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-2 -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DACPr1zCgZ ++ cat /tmp/tmp.K5i8OZ4woS ++ rm /tmp/tmp.DACPr1zCgZ /tmp/tmp.K5i8OZ4woS ++ return 0 + backup_dest=operator-testing/2025-12-18T19:21:03Z + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/pitr-physical-backup-source/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-2/' + /usr/sbin/sed -e /backupName/d + /usr/sbin/sed -e 's/pitrType:/type: latest/' + '[' -z '' ']' + /usr/sbin/sed -e /date:/d + /usr/sbin/sed -e 's|DESTINATION|operator-testing/2025-12-18T19:21:03Z|' + '[' -n '' ']' + yq + kubectl_bin apply -f - ++ get_bucket_name backup-minio-2 ++ local backup_name=backup-minio-2 ++ kubectl_bin get psmdb-backup backup-minio-2 -o 'jsonpath={.status.s3.bucket}' +++ mktemp ++ mktemp ++ local LAST_OUT=/tmp/tmp.JSmMkvO2z1 +++ mktemp + local LAST_OUT=/tmp/tmp.wb8qngrtZb ++ local LAST_ERR=/tmp/tmp.GrkFC8Jl33 ++ local exit_status=0 ++ local timeout=4 ++ mktemp +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-2 -o 'jsonpath={.status.s3.bucket}' + local LAST_ERR=/tmp/tmp.gKfdgHjRWt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JSmMkvO2z1 ++ cat /tmp/tmp.GrkFC8Jl33 ++ rm /tmp/tmp.JSmMkvO2z1 /tmp/tmp.GrkFC8Jl33 ++ return 0 + /usr/sbin/sed -e 's|BUCKET-NAME|operator-testing|' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wb8qngrtZb perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-2 created + cat /tmp/tmp.gKfdgHjRWt + rm /tmp/tmp.wb8qngrtZb /tmp/tmp.gKfdgHjRWt + return 0 + wait_restore backup-minio-2 some-name requested 0 1200 + local backup_name=backup-minio-2 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-2 object to be created.OK Waiting psmdb-restore/restore-backup-minio-2 to reach state "requested" .........OK after 8 minutes + [[ 0 -eq 1 ]] + echo + wait_restore backup-minio-2 some-name ready 0 1600 + local backup_name=backup-minio-2 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1600 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-2 object to be created.OK Waiting psmdb-restore/restore-backup-minio-2 to reach state "ready" ...OK after 2 minutes + [[ 0 -eq 1 ]] + echo + set -o xtrace + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EL0dnwIr9Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.Erphf7fXPi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EL0dnwIr9Q ++ cat /tmp/tmp.Erphf7fXPi ++ rm /tmp/tmp.EL0dnwIr9Q /tmp/tmp.Erphf7fXPi ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2bmoDy2u6F +++ mktemp ++ local LAST_ERR=/tmp/tmp.Tmxj63gT1P ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2bmoDy2u6F ++ cat /tmp/tmp.Tmxj63gT1P ++ rm /tmp/tmp.2bmoDy2u6F /tmp/tmp.Tmxj63gT1P ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AHBT8aVP0f +++ mktemp ++ local LAST_ERR=/tmp/tmp.lr4IyPgWRf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AHBT8aVP0f ++ cat /tmp/tmp.lr4IyPgWRf ++ rm /tmp/tmp.AHBT8aVP0f /tmp/tmp.lr4IyPgWRf ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness........................................................... + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ea5NMA0XNu +++ mktemp ++ local LAST_ERR=/tmp/tmp.c7PQoooRub ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ea5NMA0XNu ++ cat /tmp/tmp.c7PQoooRub ++ rm /tmp/tmp.Ea5NMA0XNu /tmp/tmp.c7PQoooRub ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RK87e33XBm +++ mktemp ++ local LAST_ERR=/tmp/tmp.S3CgM8J7qx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RK87e33XBm ++ cat /tmp/tmp.S3CgM8J7qx ++ rm /tmp/tmp.RK87e33XBm /tmp/tmp.S3CgM8J7qx ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SXAs2DPOwT +++ mktemp ++ local LAST_ERR=/tmp/tmp.snsGeDjrlI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SXAs2DPOwT ++ cat /tmp/tmp.snsGeDjrlI ++ rm /tmp/tmp.SXAs2DPOwT /tmp/tmp.snsGeDjrlI ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vr84eDZuCA +++ mktemp ++ local LAST_ERR=/tmp/tmp.LVgw78XsqT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vr84eDZuCA ++ cat /tmp/tmp.LVgw78XsqT ++ rm /tmp/tmp.vr84eDZuCA /tmp/tmp.LVgw78XsqT ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iPHuIRErzA +++ mktemp ++ local LAST_ERR=/tmp/tmp.farOeEvCES ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iPHuIRErzA ++ cat /tmp/tmp.farOeEvCES ++ rm /tmp/tmp.iPHuIRErzA /tmp/tmp.farOeEvCES ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Omn0LjSmAn +++ mktemp ++ local LAST_ERR=/tmp/tmp.4YZoU3uoww ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Omn0LjSmAn ++ cat /tmp/tmp.4YZoU3uoww ++ rm /tmp/tmp.Omn0LjSmAn /tmp/tmp.4YZoU3uoww ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 10 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 -3rd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2025-12-18T19:38:14+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oPF5KeMWcl +++ mktemp ++ local LAST_ERR=/tmp/tmp.nBDG5NpnQF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oPF5KeMWcl ++ cat /tmp/tmp.nBDG5NpnQF ++ rm /tmp/tmp.oPF5KeMWcl /tmp/tmp.nBDG5NpnQF ++ return 0 + local client_container=psmdb-client-696897d69b-9xg4t + kubectl_bin exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.QqWbHMNuXq ++ mktemp + local LAST_ERR=/tmp/tmp.Xzp8i7FdcL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-9xg4t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-16105.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QqWbHMNuXq + cat /tmp/tmp.Xzp8i7FdcL + rm /tmp/tmp.QqWbHMNuXq /tmp/tmp.Xzp8i7FdcL + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/pitr-physical-backup-source/compare/find-3rd.json /tmp/tmp.iB4fDcwwkH/find-3rd + desc 'disable pitr' + set +o xtrace ----------------------------------------------------------------------------------- disable pitr ----------------------------------------------------------------------------------- + kubectl patch psmdb some-name --type=merge --patch '{"spec": {"backup": {"pitr": {"enabled": false}}}}' perconaservermongodb.psmdb.percona.com/some-name patched + sleep 20 + desc 'delete all backups' + set +o xtrace ----------------------------------------------------------------------------------- delete all backups ----------------------------------------------------------------------------------- + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.BEOgcgoeYE ++ mktemp + local LAST_ERR=/tmp/tmp.P4sEuyQm9B + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BEOgcgoeYE perconaservermongodbbackup.psmdb.percona.com "backup-minio-1" deleted from pitr-physical-backup-source-16105 namespace perconaservermongodbbackup.psmdb.percona.com "backup-minio-2" deleted from pitr-physical-backup-source-16105 namespace + cat /tmp/tmp.P4sEuyQm9B + rm /tmp/tmp.BEOgcgoeYE /tmp/tmp.P4sEuyQm9B + return 0 + desc 'destroy cluster' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster ----------------------------------------------------------------------------------- + destroy pitr-physical-backup-source-16105 + local namespace=pitr-physical-backup-source-16105 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers +++ mktemp ++ wc -l ++ local LAST_OUT=/tmp/tmp.6YD03xJ5AT +++ mktemp ++ local LAST_ERR=/tmp/tmp.L1xphyrAhX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6YD03xJ5AT ++ cat /tmp/tmp.L1xphyrAhX No resources found in pitr-physical-backup-source-16105 namespace. ++ rm /tmp/tmp.6YD03xJ5AT /tmp/tmp.L1xphyrAhX ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.lfUz6uYYyw ++ mktemp + local LAST_ERR=/tmp/tmp.GfX8y0PI08 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lfUz6uYYyw customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.GfX8y0PI08 + rm /tmp/tmp.lfUz6uYYyw /tmp/tmp.GfX8y0PI08 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.HyYBU1dLeV ++ mktemp + local LAST_ERR=/tmp/tmp.IW6HKqUNV7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HyYBU1dLeV + cat /tmp/tmp.IW6HKqUNV7 + rm /tmp/tmp.HyYBU1dLeV /tmp/tmp.IW6HKqUNV7 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.IPXnZCKVLO ++ mktemp + local LAST_ERR=/tmp/tmp.n5dEUUpWeS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IPXnZCKVLO + cat /tmp/tmp.n5dEUUpWeS + rm /tmp/tmp.IPXnZCKVLO /tmp/tmp.n5dEUUpWeS + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.cH2EN48jW6 ++ mktemp + local LAST_ERR=/tmp/tmp.qmjVRPZj0R + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cH2EN48jW6 + cat /tmp/tmp.qmjVRPZj0R + rm /tmp/tmp.cH2EN48jW6 /tmp/tmp.qmjVRPZj0R + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.wTSgZRlYmA ++ mktemp + local LAST_ERR=/tmp/tmp.kASSnMrB3O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wTSgZRlYmA clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.kASSnMrB3O + rm /tmp/tmp.wTSgZRlYmA /tmp/tmp.kASSnMrB3O + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.EgvDYoEig9 ++ mktemp + local LAST_ERR=/tmp/tmp.9yiY27p426 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.EgvDYoEig9 + cat /tmp/tmp.9yiY27p426 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.EgvDYoEig9 + cat /tmp/tmp.9yiY27p426 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.EgvDYoEig9 + cat /tmp/tmp.9yiY27p426 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.EgvDYoEig9 + cat /tmp/tmp.9yiY27p426 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.EgvDYoEig9 /tmp/tmp.9yiY27p426 + return 1 + true + '[' -n '' ']' + kubectl_bin delete --grace-period=0 --force=true namespace pitr-physical-backup-source-16105 + '[' -n psmdb-operator ']' ++ mktemp + rm -rf /tmp/tmp.iB4fDcwwkH + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator + local LAST_OUT=/tmp/tmp.6pMeCv5o4h ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.rJJEQgSI74 + local LAST_ERR=/tmp/tmp.6cdeKzhsVx ++ mktemp + local exit_status=0 + local timeout=4 ++ seq 0 2 + desc 'test passed' + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace pitr-physical-backup-source-16105 + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_ERR=/tmp/tmp.FD4SuJ0sLd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator