Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/logs/pitr-physical-backup-source.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + main + create_infra pitr-physical-backup-source-9554 + local ns=pitr-physical-backup-source-9554 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.3ISjjwyDJW ++ mktemp + local LAST_ERR=/tmp/tmp.4CzOQep3H9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3ISjjwyDJW customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.4CzOQep3H9 + rm /tmp/tmp.3ISjjwyDJW /tmp/tmp.4CzOQep3H9 + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/crd.yaml grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.qYs4RbH1ph ++ mktemp + local LAST_ERR=/tmp/tmp.dg3bWRP9pz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qYs4RbH1ph + cat /tmp/tmp.dg3bWRP9pz + rm /tmp/tmp.qYs4RbH1ph /tmp/tmp.dg3bWRP9pz + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.upP7dPDsEq ++ mktemp + local LAST_ERR=/tmp/tmp.6nkCq9xFhu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.upP7dPDsEq + cat /tmp/tmp.6nkCq9xFhu + rm /tmp/tmp.upP7dPDsEq /tmp/tmp.6nkCq9xFhu + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.EVwCc7NJB7 ++ mktemp + local LAST_ERR=/tmp/tmp.TOAPzX5VGQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EVwCc7NJB7 + cat /tmp/tmp.TOAPzX5VGQ + rm /tmp/tmp.EVwCc7NJB7 /tmp/tmp.TOAPzX5VGQ + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.4V7Ju4cXI9 ++ mktemp + local LAST_ERR=/tmp/tmp.dyRsc0Y3Gq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4V7Ju4cXI9 clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.dyRsc0Y3Gq + rm /tmp/tmp.4V7Ju4cXI9 /tmp/tmp.dyRsc0Y3Gq + return 0 + check_crd_for_deletion PR-2256-6d23a024 + local git_tag=PR-2256-6d23a024 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2256-6d23a024/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KjdNA1B1XP +++ mktemp ++ local LAST_ERR=/tmp/tmp.k6BhKydAJx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.KjdNA1B1XP ++ cat /tmp/tmp.k6BhKydAJx Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.KjdNA1B1XP ++ cat /tmp/tmp.k6BhKydAJx Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.KjdNA1B1XP ++ cat /tmp/tmp.k6BhKydAJx Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.KjdNA1B1XP ++ cat /tmp/tmp.k6BhKydAJx Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.KjdNA1B1XP /tmp/tmp.k6BhKydAJx ++ return 1 + [[ '' == Terminating ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' ++ mktemp + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.qVhbfQoXcM ++ mktemp + local LAST_OUT=/tmp/tmp.SyFXJedBN9 ++ mktemp + local LAST_ERR=/tmp/tmp.VBJvc5Yctd + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.3KoErqheU2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qVhbfQoXcM + cat /tmp/tmp.VBJvc5Yctd + rm /tmp/tmp.qVhbfQoXcM /tmp/tmp.VBJvc5Yctd + return 0 namespace "pitr-physical-backup-source-23818" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SyFXJedBN9 namespace "psmdb-operator" deleted + cat /tmp/tmp.3KoErqheU2 + rm /tmp/tmp.SyFXJedBN9 /tmp/tmp.3KoErqheU2 + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.910klZ6UTE ++ mktemp + local LAST_ERR=/tmp/tmp.Rs6hJKjdGh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.910klZ6UTE + cat /tmp/tmp.Rs6hJKjdGh + rm /tmp/tmp.910klZ6UTE /tmp/tmp.Rs6hJKjdGh + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ZBejWQ8bIt ++ mktemp + local LAST_ERR=/tmp/tmp.tOjHlzpyUE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZBejWQ8bIt namespace/psmdb-operator created + cat /tmp/tmp.tOjHlzpyUE + rm /tmp/tmp.ZBejWQ8bIt /tmp/tmp.tOjHlzpyUE + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.72opJYBvJg +++ mktemp ++ local LAST_ERR=/tmp/tmp.qOCtOOiKBG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.72opJYBvJg ++ cat /tmp/tmp.qOCtOOiKBG ++ rm /tmp/tmp.72opJYBvJg /tmp/tmp.qOCtOOiKBG ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2256-6d23a024-4-cluster3 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.d5SxT2EJFQ ++ mktemp + local LAST_ERR=/tmp/tmp.cSDYMfL3ru + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2256-6d23a024-4-cluster3 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.d5SxT2EJFQ Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2256-6d23a024-4-cluster3" modified. + cat /tmp/tmp.cSDYMfL3ru + rm /tmp/tmp.d5SxT2EJFQ /tmp/tmp.cSDYMfL3ru + return 0 + deploy_operator + desc 'start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2256-6d23a024' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2256-6d23a024 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/pitr-physical-backup-source/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.kZlRwKYaG8 ++ mktemp + local LAST_ERR=/tmp/tmp.jmJJc5DBrR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kZlRwKYaG8 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.jmJJc5DBrR + rm /tmp/tmp.kZlRwKYaG8 /tmp/tmp.jmJJc5DBrR + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: psmdb-operator^' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/cw-rbac.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.C1bzVNvvQb ++ mktemp + local LAST_ERR=/tmp/tmp.bjwevgj0dO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.C1bzVNvvQb clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.bjwevgj0dO + rm /tmp/tmp.C1bzVNvvQb /tmp/tmp.bjwevgj0dO + return 0 + kubectl_bin apply -n psmdb-operator -f - + yq eval $'\n\t\t\t(.spec.template.spec.containers[].image = "docker.io/perconalab/percona-server-mongodb-operator:PR-2256-6d23a024") |\n\t\t\t((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") |\n\t\t\t((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.ktUwh7UyBI ++ mktemp + local LAST_ERR=/tmp/tmp.leIKnFurGy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ktUwh7UyBI deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.leIKnFurGy + rm /tmp/tmp.ktUwh7UyBI /tmp/tmp.leIKnFurGy + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.swoOJQ7nKB +++ mktemp ++ local LAST_ERR=/tmp/tmp.YJWznGYevi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.swoOJQ7nKB ++ cat /tmp/tmp.YJWznGYevi ++ rm /tmp/tmp.swoOJQ7nKB /tmp/tmp.YJWznGYevi ++ return 0 + wait_operator_pod percona-server-mongodb-operator-69449bddd8-zwl7k + local pod=percona-server-mongodb-operator-69449bddd8-zwl7k + set +o xtrace waiting for pod/percona-server-mongodb-operator-69449bddd8-zwl7k to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZEwtF78q1Z +++ mktemp ++ local LAST_ERR=/tmp/tmp.vTmDWfK5BA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZEwtF78q1Z ++ cat /tmp/tmp.vTmDWfK5BA ++ rm /tmp/tmp.ZEwtF78q1Z /tmp/tmp.vTmDWfK5BA ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-69449bddd8-zwl7k ++ mktemp + local LAST_OUT=/tmp/tmp.0TX5v0rumH ++ mktemp + local LAST_ERR=/tmp/tmp.48LTGrO96t + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-69449bddd8-zwl7k + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0TX5v0rumH + cat /tmp/tmp.48LTGrO96t + rm /tmp/tmp.0TX5v0rumH /tmp/tmp.48LTGrO96t + return 0 2026-04-01T08:08:53.305Z INFO setup Manager starting up {"gitCommit": "6d23a02448e5034120ea9854d0301e90217db846", "gitBranch": "PR-2256-6d23a024", "buildTime": "", "goVersion": "go1.25.8", "os": "linux", "arch": "amd64"} + create_namespace pitr-physical-backup-source-9554 + local namespace=pitr-physical-backup-source-9554 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces pitr-physical-backup-source-9554' + awk '{print$1}' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pitr-physical-backup-source-9554 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pitr-physical-backup-source-9554 --ignore-not-found ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.IE3wZUtStj + local LAST_OUT=/tmp/tmp.FFE5d8rL6S ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.dkOL7OExrt + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.W45qykLnzx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace pitr-physical-backup-source-9554 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IE3wZUtStj + cat /tmp/tmp.dkOL7OExrt + rm /tmp/tmp.IE3wZUtStj /tmp/tmp.dkOL7OExrt + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FFE5d8rL6S + cat /tmp/tmp.W45qykLnzx + rm /tmp/tmp.FFE5d8rL6S /tmp/tmp.W45qykLnzx + return 0 + kubectl_bin wait --for=delete namespace pitr-physical-backup-source-9554 ++ mktemp + local LAST_OUT=/tmp/tmp.faZeqaxPM7 error: resource(s) were provided, but no name was specified ++ mktemp + local LAST_ERR=/tmp/tmp.cGRUFEree7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace pitr-physical-backup-source-9554 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.faZeqaxPM7 + cat /tmp/tmp.cGRUFEree7 + rm /tmp/tmp.faZeqaxPM7 /tmp/tmp.cGRUFEree7 + return 0 + desc 'create namespace pitr-physical-backup-source-9554' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pitr-physical-backup-source-9554 ----------------------------------------------------------------------------------- + kubectl_bin create namespace pitr-physical-backup-source-9554 ++ mktemp + local LAST_OUT=/tmp/tmp.TZ495tfTqS ++ mktemp + local LAST_ERR=/tmp/tmp.j58cwzc3VP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace pitr-physical-backup-source-9554 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TZ495tfTqS namespace/pitr-physical-backup-source-9554 created + cat /tmp/tmp.j58cwzc3VP + rm /tmp/tmp.TZ495tfTqS /tmp/tmp.j58cwzc3VP + return 0 + set_kube_ctx pitr-physical-backup-source-9554 + local namespace=pitr-physical-backup-source-9554 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.xeWIXtYoq6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.0vbzKW0QqZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xeWIXtYoq6 ++ cat /tmp/tmp.0vbzKW0QqZ ++ rm /tmp/tmp.xeWIXtYoq6 /tmp/tmp.0vbzKW0QqZ ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2256-6d23a024-4-cluster3 --namespace=pitr-physical-backup-source-9554 ++ mktemp + local LAST_OUT=/tmp/tmp.MyCIJde56M ++ mktemp + local LAST_ERR=/tmp/tmp.D897KK29u4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2256-6d23a024-4-cluster3 --namespace=pitr-physical-backup-source-9554 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MyCIJde56M Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2256-6d23a024-4-cluster3" modified. + cat /tmp/tmp.D897KK29u4 + rm /tmp/tmp.MyCIJde56M /tmp/tmp.D897KK29u4 + return 0 + deploy_minio + local cert_secret= + local service_name=minio-service + desc 'install MinIO: minio-service' + set +o xtrace ----------------------------------------------------------------------------------- install MinIO: minio-service ----------------------------------------------------------------------------------- + helm uninstall minio-service + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + local endpoint=http://minio-service:9000 + minio_args=('--version' '5.4.0' '--set' 'replicas=1' '--set' 'mode=standalone' '--set' 'resources.requests.memory=256Mi' '--set' 'rootUser=rootuser' '--set' 'rootPassword=rootpass123' '--set' 'users[0].accessKey=some-access-key' '--set' 'users[0].secretKey=some-secret-key' '--set' 'users[0].policy=consoleAdmin' '--set' 'service.type=ClusterIP' '--set' 'configPathmc=/tmp/' '--set' 'securityContext.enabled=false' '--set' 'persistence.size=2G' '--set' 'fullnameOverride=minio-service' '--set' 'serviceAccount.create=true' '--set' 'serviceAccount.name=minio-service-sa') + local minio_args + [[ -n '' ]] + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio NAME: minio-service LAST DEPLOYED: Wed Apr 1 08:09:29 2026 NAMESPACE: pitr-physical-backup-source-9554 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.pitr-physical-backup-source-9554.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace pitr-physical-backup-source-9554 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace pitr-physical-backup-source-9554 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace pitr-physical-backup-source-9554 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace pitr-physical-backup-source-9554 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.juw4dMXeUO +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZDZUe3Y6Pf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.juw4dMXeUO ++ cat /tmp/tmp.ZDZUe3Y6Pf ++ rm /tmp/tmp.juw4dMXeUO /tmp/tmp.ZDZUe3Y6Pf ++ return 0 + local MINIO_POD=minio-service-6d5f646cdc-xwz2d + wait_pod minio-service-6d5f646cdc-xwz2d + local pod=minio-service-6d5f646cdc-xwz2d + set +o xtrace waiting for pod/minio-service-6d5f646cdc-xwz2d to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-physical-backup-source-9554.svc.cluster.local --tcp=9000 service/minio-service created + create_minio_bucket operator-testing http://minio-service:9000 + local bucket=operator-testing + local endpoint=http://minio-service:9000 + kubectl_bin run -i --rm aws-cli --image=docker.io/perconalab/awscli --restart=Never -- bash -c $'AWS_ACCESS_KEY_ID=some-access-key \t\tAWS_SECRET_ACCESS_KEY=some-secret-key \t\tAWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.L6P9arw1w3 ++ mktemp + local LAST_ERR=/tmp/tmp.LOb30rQZt8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=docker.io/perconalab/awscli --restart=Never -- bash -c $'AWS_ACCESS_KEY_ID=some-access-key \t\tAWS_SECRET_ACCESS_KEY=some-secret-key \t\tAWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.L6P9arw1w3 pod "aws-cli" deleted from pitr-physical-backup-source-9554 namespace + cat /tmp/tmp.LOb30rQZt8 All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.L6P9arw1w3 /tmp/tmp.LOb30rQZt8 + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/conf/minio-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.1fF3hvpFMF ++ mktemp + local LAST_ERR=/tmp/tmp.4Qrj5PfTox + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/conf/minio-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1fF3hvpFMF secret/some-users created deployment.apps/psmdb-client created secret/minio-secret created + cat /tmp/tmp.4Qrj5PfTox + rm /tmp/tmp.1fF3hvpFMF /tmp/tmp.4Qrj5PfTox + return 0 + cluster=some-name + desc 'create first PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/pitr-physical-backup-source/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/pitr-physical-backup-source/conf/some-name-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/pitr-physical-backup-source/conf/some-name-rs0.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2256-6d23a024"' + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/pitr-physical-backup-source-9554/g + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.FLns9XPKe6 ++ mktemp + local LAST_ERR=/tmp/tmp.SBfcyIKasZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FLns9XPKe6 perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.SBfcyIKasZ + rm /tmp/tmp.FLns9XPKe6 /tmp/tmp.SBfcyIKasZ + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.......OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SIcERiTztn +++ mktemp ++ local LAST_ERR=/tmp/tmp.kzndbqZrbz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SIcERiTztn ++ cat /tmp/tmp.kzndbqZrbz ++ rm /tmp/tmp.SIcERiTztn /tmp/tmp.kzndbqZrbz ++ return 0 + [[ '' == true ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wFmFpmDvMA +++ mktemp ++ local LAST_ERR=/tmp/tmp.yx8sgcjQKI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wFmFpmDvMA ++ cat /tmp/tmp.yx8sgcjQKI ++ rm /tmp/tmp.wFmFpmDvMA /tmp/tmp.yx8sgcjQKI ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.d8AUEXurH7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.i6SUdTerYU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.d8AUEXurH7 ++ cat /tmp/tmp.i6SUdTerYU ++ rm /tmp/tmp.d8AUEXurH7 /tmp/tmp.i6SUdTerYU ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness....................................................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dqPP2NEq9K +++ mktemp ++ local LAST_ERR=/tmp/tmp.dTo4vHDHkR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dqPP2NEq9K ++ cat /tmp/tmp.dTo4vHDHkR ++ rm /tmp/tmp.dqPP2NEq9K /tmp/tmp.dTo4vHDHkR ++ return 0 + [[ '' == true ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zP0ea0bBke +++ mktemp ++ local LAST_ERR=/tmp/tmp.2pKnbs96pY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zP0ea0bBke ++ cat /tmp/tmp.2pKnbs96pY ++ rm /tmp/tmp.zP0ea0bBke /tmp/tmp.2pKnbs96pY ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JH1W3klisp +++ mktemp ++ local LAST_ERR=/tmp/tmp.VVyluJ5mgZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JH1W3klisp ++ cat /tmp/tmp.VVyluJ5mgZ ++ rm /tmp/tmp.JH1W3klisp /tmp/tmp.VVyluJ5mgZ ++ return 0 + [[ '' == true ]] + sleep 10 + [[ false == true ]] + sleep 10 + write_initial_data + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-9554 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-9554 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xCA2Xhj7ZE +++ mktemp ++ local LAST_ERR=/tmp/tmp.Pu4vvaNAMS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xCA2Xhj7ZE ++ cat /tmp/tmp.Pu4vvaNAMS ++ rm /tmp/tmp.xCA2Xhj7ZE /tmp/tmp.Pu4vvaNAMS ++ return 0 + local client_container=psmdb-client-bb8b97679-4p4nr + kubectl_bin exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.tUl3d1qovC ++ mktemp + local LAST_ERR=/tmp/tmp.CrrYnFW4FX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tUl3d1qovC Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("11287a6d-067b-4c9d-9b83-287f558b86e2") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.CrrYnFW4FX + rm /tmp/tmp.tUl3d1qovC /tmp/tmp.CrrYnFW4FX + return 0 + sleep 2 + write_document + local cmp_postfix= + local sleep_value=0 + log 'write initial data, read from all' + set +o xtrace [2026-04-01T08:14:03+0000] write initial data, read from all + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Y4ukysZ0HC +++ mktemp ++ local LAST_ERR=/tmp/tmp.VnALJXHmaK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Y4ukysZ0HC ++ cat /tmp/tmp.VnALJXHmaK ++ rm /tmp/tmp.Y4ukysZ0HC /tmp/tmp.VnALJXHmaK ++ return 0 + local client_container=psmdb-client-bb8b97679-4p4nr + kubectl_bin exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.zxphf0sFAl ++ mktemp + local LAST_ERR=/tmp/tmp.XIHJQebzb0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zxphf0sFAl Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("c6f63ca4-fd45-485b-9306-3acccddf3a68") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.XIHJQebzb0 + rm /tmp/tmp.zxphf0sFAl /tmp/tmp.XIHJQebzb0 + return 0 + sleep 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == true ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-04-01T08:14:06+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vMY81xCfum +++ mktemp ++ local LAST_ERR=/tmp/tmp.2kI0LpM5nx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vMY81xCfum ++ cat /tmp/tmp.2kI0LpM5nx ++ rm /tmp/tmp.vMY81xCfum /tmp/tmp.2kI0LpM5nx ++ return 0 + local client_container=psmdb-client-bb8b97679-4p4nr + kubectl_bin exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.qNjW95EfyB ++ mktemp + local LAST_ERR=/tmp/tmp.LakuzsGC7p + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qNjW95EfyB + cat /tmp/tmp.LakuzsGC7p + rm /tmp/tmp.qNjW95EfyB /tmp/tmp.LakuzsGC7p + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/pitr-physical-backup-source/compare/find.json /tmp/tmp.qeMq7cb3Mw/find + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-0...2026-04-01T08:13:12.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-1...2026-04-01T08:13:15.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-2...2026-04-01T08:13:25.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-0 + local agent_pod=some-name-rs1-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-0...2026-04-01T08:13:14.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-1 + local agent_pod=some-name-rs1-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-1...2026-04-01T08:13:16.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-2 + local agent_pod=some-name-rs1-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-2...2026-04-01T08:13:26.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-0 + local agent_pod=some-name-rs2-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-0...2026-04-01T08:13:15.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-1 + local agent_pod=some-name-rs2-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-1...2026-04-01T08:13:17.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-2 + local agent_pod=some-name-rs2-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-2...2026-04-01T08:13:27.000+0000 I listening for the commands + echo 'Sleeping for 360 seconds' Sleeping for 360 seconds + sleep 360 + backup_name_minio=backup-minio + desc 'restore pitr type date using backupSource' + set +o xtrace ----------------------------------------------------------------------------------- restore pitr type date using backupSource ----------------------------------------------------------------------------------- + run_backup backup-minio 1 physical + local name=backup-minio + local idx=1 + local type=physical + desc 'run backup backup-minio-1' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-1 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/pitr-physical-backup-source/conf/backup-minio.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-1/' + /usr/sbin/sed -e 's/type:/type: physical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.oqSBgzRdrP ++ mktemp + local LAST_ERR=/tmp/tmp.qv3I0T56M1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oqSBgzRdrP perconaservermongodbbackup.psmdb.percona.com/backup-minio-1 created + cat /tmp/tmp.qv3I0T56M1 + rm /tmp/tmp.oqSBgzRdrP /tmp/tmp.qv3I0T56M1 + return 0 + wait_backup backup-minio-1 + local backup_name=backup-minio-1 + local target_state=ready + set +o xtrace waiting for backup-minio-1 to reach ready state.......OK + compare_latest_restorable_time some-name-rs0 backup-minio-1 + local cluster=some-name-rs0 + local backup_name=backup-minio-1 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.n2skXKryuv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JSLw8GkZfN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.n2skXKryuv +++ cat /tmp/tmp.JSLw8GkZfN +++ rm /tmp/tmp.n2skXKryuv /tmp/tmp.JSLw8GkZfN +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != null ]] ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != null ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.00qW2fsCpi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.tGFPIEmNXb +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.00qW2fsCpi +++ cat /tmp/tmp.tGFPIEmNXb +++ rm /tmp/tmp.00qW2fsCpi /tmp/tmp.tGFPIEmNXb +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != null ]] ++ let retry+=1 ++ [[ 2 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != null ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.AjnS5dxSWF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.eg3QDpIMle +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.AjnS5dxSWF +++ cat /tmp/tmp.eg3QDpIMle +++ rm /tmp/tmp.AjnS5dxSWF /tmp/tmp.eg3QDpIMle +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != null ]] ++ let retry+=1 ++ [[ 3 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != null ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gUVXWHuJ18 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.H2E8xqxy3e +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.gUVXWHuJ18 +++ cat /tmp/tmp.H2E8xqxy3e +++ rm /tmp/tmp.gUVXWHuJ18 /tmp/tmp.H2E8xqxy3e +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != null ]] ++ let retry+=1 ++ [[ 4 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != null ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zBEUgR0DbC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4e7fSacaVg +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.zBEUgR0DbC +++ cat /tmp/tmp.4e7fSacaVg +++ rm /tmp/tmp.zBEUgR0DbC /tmp/tmp.4e7fSacaVg +++ return 0 ++ first_timestamp=1775031659 ++ sleep 5 ++ [[ 1775031659 != '' ]] ++ [[ 1775031659 != null ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.KPKzrNDKRm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aBB52Tzj22 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.KPKzrNDKRm +++ cat /tmp/tmp.aBB52Tzj22 +++ rm /tmp/tmp.KPKzrNDKRm /tmp/tmp.aBB52Tzj22 +++ return 0 ++ second_timestamp=1775031653 ++ let retry+=1 ++ [[ 5 -gt 30 ]] ++ [[ 1775031659 != '' ]] ++ [[ 1775031659 != null ]] ++ [[ 1775031659 == 1775031653 ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UbXI9dWfRG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.c8NZSJIi8x +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.UbXI9dWfRG +++ cat /tmp/tmp.c8NZSJIi8x +++ rm /tmp/tmp.UbXI9dWfRG /tmp/tmp.c8NZSJIi8x +++ return 0 ++ first_timestamp=1775031653 ++ sleep 5 ++ [[ 1775031653 != '' ]] ++ [[ 1775031653 != null ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YkF1lr7nO0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.O3V2mHCJZu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.YkF1lr7nO0 +++ cat /tmp/tmp.O3V2mHCJZu +++ rm /tmp/tmp.YkF1lr7nO0 /tmp/tmp.O3V2mHCJZu +++ return 0 ++ second_timestamp=1775031653 ++ let retry+=1 ++ [[ 6 -gt 30 ]] ++ [[ 1775031653 != '' ]] ++ [[ 1775031653 != null ]] ++ [[ 1775031653 == 1775031653 ]] ++ /usr/sbin/date -u -d @1775031653 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2026-04-01T08:20:53Z ++ get_latest_restorable_time_from_backup_object backup-minio-1 ++ local backup_name=backup-minio-1 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-1 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nxWGdJAGSo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZplW0Ts2pX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb-backup backup-minio-1 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.nxWGdJAGSo +++ cat /tmp/tmp.ZplW0Ts2pX +++ rm /tmp/tmp.nxWGdJAGSo /tmp/tmp.ZplW0Ts2pX +++ return 0 ++ latestRestorableTime=2026-04-01T08:20:53Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2026-04-01T08:20:53Z != '' ]] ++ [[ 2026-04-01T08:20:53Z != null ]] ++ echo 2026-04-01T08:20:53Z + backup_time=2026-04-01T08:20:53Z + [[ 2026-04-01T08:20:53Z != 2026\-04\-01T08\:20\:53Z ]] + reset_collection + desc 'reset data' + set +o xtrace ----------------------------------------------------------------------------------- reset data ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.remove({})' myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local 'command=use myApp\n db.test.remove({})' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ awk -F: '{print $2}' ++ echo .svc.cluster.local + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mf2ty60b2J +++ mktemp ++ local LAST_ERR=/tmp/tmp.kpBmlN0cRW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mf2ty60b2J ++ cat /tmp/tmp.kpBmlN0cRW ++ rm /tmp/tmp.mf2ty60b2J /tmp/tmp.kpBmlN0cRW ++ return 0 + local client_container=psmdb-client-bb8b97679-4p4nr + kubectl_bin exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.remove({})\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.0HRlqxA1qn ++ mktemp + local LAST_ERR=/tmp/tmp.0TOLbFgl26 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.remove({})\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0HRlqxA1qn Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("6abf903b-65cf-4800-bf99-a58534285d7f") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nRemoved" : 1 }) bye + cat /tmp/tmp.0TOLbFgl26 + rm /tmp/tmp.0HRlqxA1qn /tmp/tmp.0TOLbFgl26 + return 0 + sleep 2 + write_document '' 120 + local cmp_postfix= + local sleep_value=120 + log 'write initial data, read from all' + set +o xtrace [2026-04-01T08:21:27+0000] write initial data, read from all + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jfJBoVqpcT +++ mktemp ++ local LAST_ERR=/tmp/tmp.d9MfdmfNH3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jfJBoVqpcT ++ cat /tmp/tmp.d9MfdmfNH3 ++ rm /tmp/tmp.jfJBoVqpcT /tmp/tmp.d9MfdmfNH3 ++ return 0 + local client_container=psmdb-client-bb8b97679-4p4nr + kubectl_bin exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.3hKaUh9Yl7 ++ mktemp + local LAST_ERR=/tmp/tmp.j8yZy4XOgf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3hKaUh9Yl7 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("75820b8e-d9bd-443f-94d2-9602b76eec89") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.j8yZy4XOgf + rm /tmp/tmp.3hKaUh9Yl7 /tmp/tmp.j8yZy4XOgf + return 0 + sleep 120 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == true ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-04-01T08:23:29+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 mongodb '' '' 27017 + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.02apTLKkVi +++ mktemp ++ local LAST_ERR=/tmp/tmp.BrDnBUIrpM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.02apTLKkVi ++ cat /tmp/tmp.BrDnBUIrpM ++ rm /tmp/tmp.02apTLKkVi /tmp/tmp.BrDnBUIrpM ++ return 0 + local client_container=psmdb-client-bb8b97679-4p4nr + kubectl_bin exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.w8qqhsTw0P ++ mktemp + local LAST_ERR=/tmp/tmp.1saQCUS3ko + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.w8qqhsTw0P + cat /tmp/tmp.1saQCUS3ko + rm /tmp/tmp.w8qqhsTw0P /tmp/tmp.1saQCUS3ko + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/pitr-physical-backup-source/compare/find.json /tmp/tmp.qeMq7cb3Mw/find ++ run_mongos 'new Date().getTime() / 1000' myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 mongodb '' --quiet ++ local 'command=new Date().getTime() / 1000' ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match' ++ local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local port=27017 ++ local mongo_bin=mongo ++ cut -d. -f1 +++ echo .svc.cluster.local +++ awk -F: '{print $2}' ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qhrsUocIJ1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QSDgKbdTRF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.qhrsUocIJ1 +++ cat /tmp/tmp.QSDgKbdTRF +++ rm /tmp/tmp.qhrsUocIJ1 /tmp/tmp.QSDgKbdTRF +++ return 0 ++ local client_container=psmdb-client-bb8b97679-4p4nr ++ kubectl_bin exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''new Date().getTime() / 1000\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sMo8dmHDUo +++ mktemp ++ local LAST_ERR=/tmp/tmp.wBmrlKqDDf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''new Date().getTime() / 1000\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sMo8dmHDUo ++ cat /tmp/tmp.wBmrlKqDDf ++ rm /tmp/tmp.sMo8dmHDUo /tmp/tmp.wBmrlKqDDf ++ return 0 + time_now=1775031814 + check_recovery backup-minio-1 date 1775031814 '' some-name backupSource + local backup_name=backup-minio-1 + local restore_type=date + local restore_date=1775031814 + local cmp_postfix= + local cluster_name=some-name + local backupSource=backupSource ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OT1jwU7igK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.p31E1SGmDD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.OT1jwU7igK +++ cat /tmp/tmp.p31E1SGmDD +++ rm /tmp/tmp.OT1jwU7igK /tmp/tmp.p31E1SGmDD +++ return 0 ++ echo 1775031767 + local latest_ts=1775031767 + desc 'write more data before restore by date' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by date ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Q0iMdy9xi1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.MPxYItUEXb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Q0iMdy9xi1 ++ cat /tmp/tmp.MPxYItUEXb ++ rm /tmp/tmp.Q0iMdy9xi1 /tmp/tmp.MPxYItUEXb ++ return 0 + local client_container=psmdb-client-bb8b97679-4p4nr + kubectl_bin exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.U678J8Bhyp ++ mktemp + local LAST_ERR=/tmp/tmp.SGF0A7Yl8k + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.U678J8Bhyp Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("10b7ae66-f913-4521-bd61-0075b2722f1a") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.SGF0A7Yl8k + rm /tmp/tmp.U678J8Bhyp /tmp/tmp.SGF0A7Yl8k + return 0 + [[ -n 1775031814 ]] ++ format_date 1775031814 ++ local timestamp=1775031814 +++ TZ=UTC +++ /usr/sbin/date -d@1775031814 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:23:34 + desc 'Restoring to time 2026-04-01 08:23:34' + set +o xtrace ----------------------------------------------------------------------------------- Restoring to time 2026-04-01 08:23:34 ----------------------------------------------------------------------------------- + retries=0 + [[ 1775031767 -gt 1775031814 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4aFG6Fgij4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.A9E1K61SHT +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4aFG6Fgij4 +++ cat /tmp/tmp.A9E1K61SHT +++ rm /tmp/tmp.4aFG6Fgij4 /tmp/tmp.A9E1K61SHT +++ return 0 ++ echo 1775031767 + latest_ts=1775031767 + retries=1 ++ format_date 1775031767 ++ local timestamp=1775031767 +++ TZ=UTC +++ /usr/sbin/date -d@1775031767 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:22:47 ++ format_date 1775031814 ++ local timestamp=1775031814 +++ TZ=UTC +++ /usr/sbin/date -d@1775031814 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:23:34 + echo 'Waiting for last oplog chunk (2026-04-01 08:22:47) to be greater than restore target (2026-04-01 08:23:34)' Waiting for last oplog chunk (2026-04-01 08:22:47) to be greater than restore target (2026-04-01 08:23:34) + sleep 10 + [[ 1775031767 -gt 1775031814 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dkFkVh7W8K ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3vygG2WI4L +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.dkFkVh7W8K +++ cat /tmp/tmp.3vygG2WI4L +++ rm /tmp/tmp.dkFkVh7W8K /tmp/tmp.3vygG2WI4L +++ return 0 ++ echo 1775031767 + latest_ts=1775031767 + retries=2 ++ format_date 1775031767 ++ local timestamp=1775031767 +++ TZ=UTC +++ /usr/sbin/date -d@1775031767 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:22:47 ++ format_date 1775031814 ++ local timestamp=1775031814 +++ TZ=UTC +++ /usr/sbin/date -d@1775031814 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:23:34 + echo 'Waiting for last oplog chunk (2026-04-01 08:22:47) to be greater than restore target (2026-04-01 08:23:34)' Waiting for last oplog chunk (2026-04-01 08:22:47) to be greater than restore target (2026-04-01 08:23:34) + sleep 10 + [[ 1775031767 -gt 1775031814 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ofCgvPgUzv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JNFgsWYfJ7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ofCgvPgUzv +++ cat /tmp/tmp.JNFgsWYfJ7 +++ rm /tmp/tmp.ofCgvPgUzv /tmp/tmp.JNFgsWYfJ7 +++ return 0 ++ echo 1775031767 + latest_ts=1775031767 + retries=3 ++ format_date 1775031767 ++ local timestamp=1775031767 +++ TZ=UTC +++ /usr/sbin/date -d@1775031767 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:22:47 ++ format_date 1775031814 ++ local timestamp=1775031814 +++ TZ=UTC +++ /usr/sbin/date -d@1775031814 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:23:34 + echo 'Waiting for last oplog chunk (2026-04-01 08:22:47) to be greater than restore target (2026-04-01 08:23:34)' Waiting for last oplog chunk (2026-04-01 08:22:47) to be greater than restore target (2026-04-01 08:23:34) + sleep 10 + [[ 1775031767 -gt 1775031814 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qMMQPIAjrV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.HWhsfnXwa4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.qMMQPIAjrV +++ cat /tmp/tmp.HWhsfnXwa4 +++ rm /tmp/tmp.qMMQPIAjrV /tmp/tmp.HWhsfnXwa4 +++ return 0 ++ echo 1775031767 + latest_ts=1775031767 + retries=4 ++ format_date 1775031767 ++ local timestamp=1775031767 +++ TZ=UTC +++ /usr/sbin/date -d@1775031767 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:22:47 ++ format_date 1775031814 ++ local timestamp=1775031814 +++ TZ=UTC +++ /usr/sbin/date -d@1775031814 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:23:34 + echo 'Waiting for last oplog chunk (2026-04-01 08:22:47) to be greater than restore target (2026-04-01 08:23:34)' Waiting for last oplog chunk (2026-04-01 08:22:47) to be greater than restore target (2026-04-01 08:23:34) + sleep 10 + [[ 1775031767 -gt 1775031814 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.HW39tWjGeK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9Qwk78z4E6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.HW39tWjGeK +++ cat /tmp/tmp.9Qwk78z4E6 +++ rm /tmp/tmp.HW39tWjGeK /tmp/tmp.9Qwk78z4E6 +++ return 0 ++ echo 1775031767 + latest_ts=1775031767 + retries=5 ++ format_date 1775031767 ++ local timestamp=1775031767 +++ TZ=UTC +++ /usr/sbin/date -d@1775031767 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:22:47 ++ format_date 1775031814 ++ local timestamp=1775031814 +++ TZ=UTC +++ /usr/sbin/date -d@1775031814 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:23:34 + echo 'Waiting for last oplog chunk (2026-04-01 08:22:47) to be greater than restore target (2026-04-01 08:23:34)' Waiting for last oplog chunk (2026-04-01 08:22:47) to be greater than restore target (2026-04-01 08:23:34) + sleep 10 + [[ 1775031767 -gt 1775031814 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OuvQaoMP1g ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZcDd3izbQa +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.OuvQaoMP1g +++ cat /tmp/tmp.ZcDd3izbQa +++ rm /tmp/tmp.OuvQaoMP1g /tmp/tmp.ZcDd3izbQa +++ return 0 ++ echo 1775031767 + latest_ts=1775031767 + retries=6 ++ format_date 1775031767 ++ local timestamp=1775031767 +++ TZ=UTC +++ /usr/sbin/date -d@1775031767 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:22:47 ++ format_date 1775031814 ++ local timestamp=1775031814 +++ TZ=UTC +++ /usr/sbin/date -d@1775031814 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:23:34 + echo 'Waiting for last oplog chunk (2026-04-01 08:22:47) to be greater than restore target (2026-04-01 08:23:34)' Waiting for last oplog chunk (2026-04-01 08:22:47) to be greater than restore target (2026-04-01 08:23:34) + sleep 10 + [[ 1775031767 -gt 1775031814 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.iD4PhPgFZa ++++ mktemp +++ local LAST_ERR=/tmp/tmp.G86tHCNeDY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.iD4PhPgFZa +++ cat /tmp/tmp.G86tHCNeDY +++ rm /tmp/tmp.iD4PhPgFZa /tmp/tmp.G86tHCNeDY +++ return 0 ++ echo 1775031767 + latest_ts=1775031767 + retries=7 ++ format_date 1775031767 ++ local timestamp=1775031767 +++ TZ=UTC +++ /usr/sbin/date -d@1775031767 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:22:47 ++ format_date 1775031814 ++ local timestamp=1775031814 +++ TZ=UTC +++ /usr/sbin/date -d@1775031814 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:23:34 + echo 'Waiting for last oplog chunk (2026-04-01 08:22:47) to be greater than restore target (2026-04-01 08:23:34)' Waiting for last oplog chunk (2026-04-01 08:22:47) to be greater than restore target (2026-04-01 08:23:34) + sleep 10 + [[ 1775031767 -gt 1775031814 ]] + [[ 7 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wulS5pXN0m ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7YH7RgjS3X +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wulS5pXN0m +++ cat /tmp/tmp.7YH7RgjS3X +++ rm /tmp/tmp.wulS5pXN0m /tmp/tmp.7YH7RgjS3X +++ return 0 ++ echo 1775031767 + latest_ts=1775031767 + retries=8 ++ format_date 1775031767 ++ local timestamp=1775031767 +++ TZ=UTC +++ /usr/sbin/date -d@1775031767 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:22:47 ++ format_date 1775031814 ++ local timestamp=1775031814 +++ TZ=UTC +++ /usr/sbin/date -d@1775031814 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:23:34 + echo 'Waiting for last oplog chunk (2026-04-01 08:22:47) to be greater than restore target (2026-04-01 08:23:34)' Waiting for last oplog chunk (2026-04-01 08:22:47) to be greater than restore target (2026-04-01 08:23:34) + sleep 10 + [[ 1775031767 -gt 1775031814 ]] + [[ 8 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cWBjZTIUyr ++++ mktemp +++ local LAST_ERR=/tmp/tmp.okTd5ypruR +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.cWBjZTIUyr +++ cat /tmp/tmp.okTd5ypruR +++ rm /tmp/tmp.cWBjZTIUyr /tmp/tmp.okTd5ypruR +++ return 0 ++ echo 1775031895 + latest_ts=1775031895 + retries=9 ++ format_date 1775031895 ++ local timestamp=1775031895 +++ TZ=UTC +++ /usr/sbin/date -d@1775031895 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:24:55 ++ format_date 1775031814 ++ local timestamp=1775031814 +++ TZ=UTC +++ /usr/sbin/date -d@1775031814 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:23:34 + echo 'Waiting for last oplog chunk (2026-04-01 08:24:55) to be greater than restore target (2026-04-01 08:23:34)' Waiting for last oplog chunk (2026-04-01 08:24:55) to be greater than restore target (2026-04-01 08:23:34) + sleep 10 + [[ 1775031895 -gt 1775031814 ]] + '[' -z backupSource ']' + desc 'check restore by date backupSource' + set +o xtrace ----------------------------------------------------------------------------------- check restore by date backupSource ----------------------------------------------------------------------------------- ++ get_backup_dest backup-minio-1 ++ local backup_name=backup-minio-1 ++ kubectl_bin get psmdb-backup backup-minio-1 -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' +++ mktemp ++ sed 's|azure://||' ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.txJK7hjsIP +++ mktemp ++ local LAST_ERR=/tmp/tmp.wG85zPZbJK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-1 -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.txJK7hjsIP ++ cat /tmp/tmp.wG85zPZbJK ++ rm /tmp/tmp.txJK7hjsIP /tmp/tmp.wG85zPZbJK ++ return 0 + backup_dest=operator-testing/2026-04-01T08:20:20Z + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/pitr-physical-backup-source/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-1/' + /usr/sbin/sed -e /backupName/d + /usr/sbin/sed -e 's/pitrType:/type: date/' + '[' -z 1775031814 ']' + /usr/sbin/sed -e 's|DESTINATION|operator-testing/2026-04-01T08:20:20Z|' ++ format_date 1775031814 ++ local timestamp=1775031814 + '[' -n '' ']' + yq +++ TZ=UTC +++ /usr/sbin/date -d@1775031814 '+%Y-%m-%d %H:%M:%S' ++ get_bucket_name backup-minio-1 + kubectl_bin apply -f - ++ local backup_name=backup-minio-1 ++ kubectl_bin get psmdb-backup backup-minio-1 -o 'jsonpath={.status.s3.bucket}' ++ mktemp +++ mktemp ++ echo 2026-04-01 08:23:34 + /usr/sbin/sed -e 's/date:/date: 2026-04-01 08:23:34/' + local LAST_OUT=/tmp/tmp.DKa3gfzZsg ++ local LAST_OUT=/tmp/tmp.69YZU8ag0H +++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.cm2InS2thY + local exit_status=0 + local timeout=4 ++ local LAST_ERR=/tmp/tmp.BplnVA5Hpv ++ local exit_status=0 ++ local timeout=4 ++ seq 0 2 +++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-1 -o 'jsonpath={.status.s3.bucket}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.69YZU8ag0H ++ cat /tmp/tmp.BplnVA5Hpv ++ rm /tmp/tmp.69YZU8ag0H /tmp/tmp.BplnVA5Hpv ++ return 0 + /usr/sbin/sed -e 's|BUCKET-NAME|operator-testing|' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DKa3gfzZsg perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-1 created + cat /tmp/tmp.cm2InS2thY + rm /tmp/tmp.DKa3gfzZsg /tmp/tmp.cm2InS2thY + return 0 + wait_restore backup-minio-1 some-name requested 0 1200 + local backup_name=backup-minio-1 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-1 object to be created.OK Waiting psmdb-restore/restore-backup-minio-1 to reach state "requested" .........OK after 8 minutes + [[ 0 -eq 1 ]] + echo + wait_restore backup-minio-1 some-name ready 0 1600 + local backup_name=backup-minio-1 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1600 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-1 object to be created.OK Waiting psmdb-restore/restore-backup-minio-1 to reach state "ready" ...OK after 2 minutes + [[ 0 -eq 1 ]] + echo + set -o xtrace + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HeEOEmPXQw +++ mktemp ++ local LAST_ERR=/tmp/tmp.pYI9uxfqH6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HeEOEmPXQw ++ cat /tmp/tmp.pYI9uxfqH6 ++ rm /tmp/tmp.HeEOEmPXQw /tmp/tmp.pYI9uxfqH6 ++ return 0 + [[ '' == true ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.558lglMIRM +++ mktemp ++ local LAST_ERR=/tmp/tmp.4alCH0AsoL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.558lglMIRM ++ cat /tmp/tmp.4alCH0AsoL ++ rm /tmp/tmp.558lglMIRM /tmp/tmp.4alCH0AsoL ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cDgiJot6Yf +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ulc0q3jrZ8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cDgiJot6Yf ++ cat /tmp/tmp.Ulc0q3jrZ8 ++ rm /tmp/tmp.cDgiJot6Yf /tmp/tmp.Ulc0q3jrZ8 ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness.................................................................... + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tsK1SYXD6B +++ mktemp ++ local LAST_ERR=/tmp/tmp.mMPs7YdDgZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tsK1SYXD6B ++ cat /tmp/tmp.mMPs7YdDgZ ++ rm /tmp/tmp.tsK1SYXD6B /tmp/tmp.mMPs7YdDgZ ++ return 0 + [[ '' == true ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ruA6br3OHi +++ mktemp ++ local LAST_ERR=/tmp/tmp.vQMu8F6IXW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ruA6br3OHi ++ cat /tmp/tmp.vQMu8F6IXW ++ rm /tmp/tmp.ruA6br3OHi /tmp/tmp.vQMu8F6IXW ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qH0YbIr2OW +++ mktemp ++ local LAST_ERR=/tmp/tmp.a1tHe1yoIT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qH0YbIr2OW ++ cat /tmp/tmp.a1tHe1yoIT ++ rm /tmp/tmp.qH0YbIr2OW /tmp/tmp.a1tHe1yoIT ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lYbxarSah1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.s33FCn0W4H ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lYbxarSah1 ++ cat /tmp/tmp.s33FCn0W4H ++ rm /tmp/tmp.lYbxarSah1 /tmp/tmp.s33FCn0W4H ++ return 0 + [[ '' == true ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wNJpbDZMXX +++ mktemp ++ local LAST_ERR=/tmp/tmp.I79U8S4x2y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wNJpbDZMXX ++ cat /tmp/tmp.I79U8S4x2y ++ rm /tmp/tmp.wNJpbDZMXX /tmp/tmp.I79U8S4x2y ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Oc24q6FKrP +++ mktemp ++ local LAST_ERR=/tmp/tmp.f6uNqBKn6t ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Oc24q6FKrP ++ cat /tmp/tmp.f6uNqBKn6t ++ rm /tmp/tmp.Oc24q6FKrP /tmp/tmp.f6uNqBKn6t ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness + sleep 10 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 '' + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == true ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-04-01T08:40:14+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local driver=mongodb + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DmbHZFwvlg +++ mktemp ++ local LAST_ERR=/tmp/tmp.daVnvZNaCy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DmbHZFwvlg ++ cat /tmp/tmp.daVnvZNaCy ++ rm /tmp/tmp.DmbHZFwvlg /tmp/tmp.daVnvZNaCy ++ return 0 + local client_container=psmdb-client-bb8b97679-4p4nr + kubectl_bin exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.1qZn7BK7QP ++ mktemp + local LAST_ERR=/tmp/tmp.alzFjG58aO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1qZn7BK7QP + cat /tmp/tmp.alzFjG58aO + rm /tmp/tmp.1qZn7BK7QP /tmp/tmp.alzFjG58aO + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/pitr-physical-backup-source/compare/find.json /tmp/tmp.qeMq7cb3Mw/find + desc 'delete PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- delete PSMDB cluster some-name ----------------------------------------------------------------------------------- + kubectl_bin delete psmdb some-name ++ mktemp + local LAST_OUT=/tmp/tmp.ZBxxIRbHnS ++ mktemp + local LAST_ERR=/tmp/tmp.3dJXcd8Jy1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb some-name + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZBxxIRbHnS perconaservermongodb.psmdb.percona.com "some-name" deleted from pitr-physical-backup-source-9554 namespace + cat /tmp/tmp.3dJXcd8Jy1 + rm /tmp/tmp.ZBxxIRbHnS /tmp/tmp.3dJXcd8Jy1 + return 0 + kubectl_bin delete pvc -l app.kubernetes.io/managed-by=percona-server-mongodb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.xoiIKVzEMG ++ mktemp + local LAST_ERR=/tmp/tmp.JDSesE9C1q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pvc -l app.kubernetes.io/managed-by=percona-server-mongodb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xoiIKVzEMG persistentvolumeclaim "mongod-data-some-name-cfg-0" deleted from pitr-physical-backup-source-9554 namespace persistentvolumeclaim "mongod-data-some-name-cfg-1" deleted from pitr-physical-backup-source-9554 namespace persistentvolumeclaim "mongod-data-some-name-cfg-2" deleted from pitr-physical-backup-source-9554 namespace persistentvolumeclaim "mongod-data-some-name-rs0-0" deleted from pitr-physical-backup-source-9554 namespace persistentvolumeclaim "mongod-data-some-name-rs0-1" deleted from pitr-physical-backup-source-9554 namespace persistentvolumeclaim "mongod-data-some-name-rs0-2" deleted from pitr-physical-backup-source-9554 namespace persistentvolumeclaim "mongod-data-some-name-rs1-0" deleted from pitr-physical-backup-source-9554 namespace persistentvolumeclaim "mongod-data-some-name-rs1-1" deleted from pitr-physical-backup-source-9554 namespace persistentvolumeclaim "mongod-data-some-name-rs1-2" deleted from pitr-physical-backup-source-9554 namespace persistentvolumeclaim "mongod-data-some-name-rs2-0" deleted from pitr-physical-backup-source-9554 namespace persistentvolumeclaim "mongod-data-some-name-rs2-1" deleted from pitr-physical-backup-source-9554 namespace persistentvolumeclaim "mongod-data-some-name-rs2-2" deleted from pitr-physical-backup-source-9554 namespace + cat /tmp/tmp.JDSesE9C1q + rm /tmp/tmp.xoiIKVzEMG /tmp/tmp.JDSesE9C1q + return 0 + sleep 10 + desc 'recreate PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- recreate PSMDB cluster some-name ----------------------------------------------------------------------------------- + desc 'create second PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- create second PSMDB cluster some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/pitr-physical-backup-source/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/pitr-physical-backup-source/conf/some-name-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/pitr-physical-backup-source/conf/some-name-rs0.yml + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2256-6d23a024"' + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/pitr-physical-backup-source-9554/g + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.1Bk9ZtRt7g ++ mktemp + local LAST_ERR=/tmp/tmp.Uik5SPF9B8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1Bk9ZtRt7g perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.Uik5SPF9B8 + rm /tmp/tmp.1Bk9ZtRt7g /tmp/tmp.Uik5SPF9B8 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.......OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.........OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oSNrtoQguG +++ mktemp ++ local LAST_ERR=/tmp/tmp.4W7rbNcV56 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oSNrtoQguG ++ cat /tmp/tmp.4W7rbNcV56 ++ rm /tmp/tmp.oSNrtoQguG /tmp/tmp.4W7rbNcV56 ++ return 0 + [[ '' == true ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.scGvIsYKv2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.YVyMMHMX4k ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.scGvIsYKv2 ++ cat /tmp/tmp.YVyMMHMX4k ++ rm /tmp/tmp.scGvIsYKv2 /tmp/tmp.YVyMMHMX4k ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Oiq03Q7oSz +++ mktemp ++ local LAST_ERR=/tmp/tmp.j6RDPgqlyE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Oiq03Q7oSz ++ cat /tmp/tmp.j6RDPgqlyE ++ rm /tmp/tmp.Oiq03Q7oSz /tmp/tmp.j6RDPgqlyE ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness.......................................................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8aFJij1hRm +++ mktemp ++ local LAST_ERR=/tmp/tmp.x22xLF0RaE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8aFJij1hRm ++ cat /tmp/tmp.x22xLF0RaE ++ rm /tmp/tmp.8aFJij1hRm /tmp/tmp.x22xLF0RaE ++ return 0 + [[ '' == true ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6TgvYh6JxB +++ mktemp ++ local LAST_ERR=/tmp/tmp.yAEf2vhLCf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6TgvYh6JxB ++ cat /tmp/tmp.yAEf2vhLCf ++ rm /tmp/tmp.6TgvYh6JxB /tmp/tmp.yAEf2vhLCf ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7VGfb3i3n0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.RZvzhVfiQh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7VGfb3i3n0 ++ cat /tmp/tmp.RZvzhVfiQh ++ rm /tmp/tmp.7VGfb3i3n0 /tmp/tmp.RZvzhVfiQh ++ return 0 + [[ '' == true ]] + sleep 10 + [[ false == true ]] + wait_for_running some-name-rs1 3 + local name=some-name-rs1 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs1 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs1-0 + local pod=some-name-rs1-0 + set +o xtrace waiting for pod/some-name-rs1-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs1-1 + local pod=some-name-rs1-1 + set +o xtrace waiting for pod/some-name-rs1-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.A01Dd5eWJY +++ mktemp ++ local LAST_ERR=/tmp/tmp.hMpGZiWL7v ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.A01Dd5eWJY ++ cat /tmp/tmp.hMpGZiWL7v ++ rm /tmp/tmp.A01Dd5eWJY /tmp/tmp.hMpGZiWL7v ++ return 0 + [[ '' == true ]] + wait_pod some-name-rs1-2 + local pod=some-name-rs1-2 + set +o xtrace waiting for pod/some-name-rs1-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CJFXM05pyl +++ mktemp ++ local LAST_ERR=/tmp/tmp.JAJAVSi84O ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CJFXM05pyl ++ cat /tmp/tmp.JAJAVSi84O ++ rm /tmp/tmp.CJFXM05pyl /tmp/tmp.JAJAVSi84O ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OgH15bMvwV +++ mktemp ++ local LAST_ERR=/tmp/tmp.fW5rheqITA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OgH15bMvwV ++ cat /tmp/tmp.fW5rheqITA ++ rm /tmp/tmp.OgH15bMvwV /tmp/tmp.fW5rheqITA ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness + wait_for_running some-name-rs2 3 + local name=some-name-rs2 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs2 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs2-0 + local pod=some-name-rs2-0 + set +o xtrace waiting for pod/some-name-rs2-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs2-1 + local pod=some-name-rs2-1 + set +o xtrace waiting for pod/some-name-rs2-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xkQIQlH6nj +++ mktemp ++ local LAST_ERR=/tmp/tmp.Qdf44BjpJh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xkQIQlH6nj ++ cat /tmp/tmp.Qdf44BjpJh ++ rm /tmp/tmp.xkQIQlH6nj /tmp/tmp.Qdf44BjpJh ++ return 0 + [[ '' == true ]] + wait_pod some-name-rs2-2 + local pod=some-name-rs2-2 + set +o xtrace waiting for pod/some-name-rs2-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ndf6gEvFA3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.DeL3svssA6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ndf6gEvFA3 ++ cat /tmp/tmp.DeL3svssA6 ++ rm /tmp/tmp.Ndf6gEvFA3 /tmp/tmp.DeL3svssA6 ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AzmGYILTaP +++ mktemp ++ local LAST_ERR=/tmp/tmp.ArFyFqCow3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs2")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AzmGYILTaP ++ cat /tmp/tmp.ArFyFqCow3 ++ rm /tmp/tmp.AzmGYILTaP /tmp/tmp.ArFyFqCow3 ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness + sleep 10 + write_initial_data + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-9554 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-9554 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eDD4QeKKDy +++ mktemp ++ local LAST_ERR=/tmp/tmp.No8cQaYupn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eDD4QeKKDy ++ cat /tmp/tmp.No8cQaYupn ++ rm /tmp/tmp.eDD4QeKKDy /tmp/tmp.No8cQaYupn ++ return 0 + local client_container=psmdb-client-bb8b97679-4p4nr + kubectl_bin exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.qObrFumQ8E ++ mktemp + local LAST_ERR=/tmp/tmp.Z31UoCSigA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qObrFumQ8E Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("42606f26-63e0-4658-8dcf-e7caf7da45d6") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.Z31UoCSigA + rm /tmp/tmp.qObrFumQ8E /tmp/tmp.Z31UoCSigA + return 0 + sleep 2 + write_document + local cmp_postfix= + local sleep_value=0 + log 'write initial data, read from all' + set +o xtrace [2026-04-01T08:45:20+0000] write initial data, read from all + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pmzMSiGnyr +++ mktemp ++ local LAST_ERR=/tmp/tmp.f44QNiiCBG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pmzMSiGnyr ++ cat /tmp/tmp.f44QNiiCBG ++ rm /tmp/tmp.pmzMSiGnyr /tmp/tmp.f44QNiiCBG ++ return 0 + local client_container=psmdb-client-bb8b97679-4p4nr + kubectl_bin exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.jeqB49GtuK ++ mktemp + local LAST_ERR=/tmp/tmp.8S1PhoGOOF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jeqB49GtuK Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("ab2d8b81-3dd3-4d1b-84cb-e86391451441") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.8S1PhoGOOF + rm /tmp/tmp.jeqB49GtuK /tmp/tmp.8S1PhoGOOF + return 0 + sleep 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == true ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-04-01T08:45:22+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yCWfWZDj5l +++ mktemp ++ local LAST_ERR=/tmp/tmp.2Xk0OO1PI8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yCWfWZDj5l ++ cat /tmp/tmp.2Xk0OO1PI8 ++ rm /tmp/tmp.yCWfWZDj5l /tmp/tmp.2Xk0OO1PI8 ++ return 0 + local client_container=psmdb-client-bb8b97679-4p4nr + kubectl_bin exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.OQKJN1A8a0 ++ mktemp + local LAST_ERR=/tmp/tmp.AubLCCpKys + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OQKJN1A8a0 + cat /tmp/tmp.AubLCCpKys + rm /tmp/tmp.OQKJN1A8a0 /tmp/tmp.AubLCCpKys + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/pitr-physical-backup-source/compare/find.json /tmp/tmp.qeMq7cb3Mw/find + desc 'restore pitr type latest using backupSource' + set +o xtrace ----------------------------------------------------------------------------------- restore pitr type latest using backupSource ----------------------------------------------------------------------------------- + write_document -2nd + local cmp_postfix=-2nd + local sleep_value=0 + log 'write initial data, read from all' + set +o xtrace [2026-04-01T08:45:25+0000] write initial data, read from all + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.73byTkfxAp +++ mktemp ++ local LAST_ERR=/tmp/tmp.jIxfGQH8lm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.73byTkfxAp ++ cat /tmp/tmp.jIxfGQH8lm ++ rm /tmp/tmp.73byTkfxAp /tmp/tmp.jIxfGQH8lm ++ return 0 + local client_container=psmdb-client-bb8b97679-4p4nr + kubectl_bin exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.NjKuhnujI6 ++ mktemp + local LAST_ERR=/tmp/tmp.zNt0dI2SPz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NjKuhnujI6 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("6e023396-a3d6-4a52-8a68-7dcb5c08e261") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.zNt0dI2SPz + rm /tmp/tmp.NjKuhnujI6 /tmp/tmp.zNt0dI2SPz + return 0 + sleep 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 -2nd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == true ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-04-01T08:45:27+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9UGahry47G +++ mktemp ++ local LAST_ERR=/tmp/tmp.medTlEz7Za ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9UGahry47G ++ cat /tmp/tmp.medTlEz7Za ++ rm /tmp/tmp.9UGahry47G /tmp/tmp.medTlEz7Za ++ return 0 + local client_container=psmdb-client-bb8b97679-4p4nr + kubectl_bin exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.Xgzf05eowM ++ mktemp + local LAST_ERR=/tmp/tmp.KPsC2uJcXI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Xgzf05eowM + cat /tmp/tmp.KPsC2uJcXI + rm /tmp/tmp.Xgzf05eowM /tmp/tmp.KPsC2uJcXI + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/pitr-physical-backup-source/compare/find-2nd.json /tmp/tmp.qeMq7cb3Mw/find-2nd + run_backup backup-minio 2 physical + local name=backup-minio + local idx=2 + local type=physical + desc 'run backup backup-minio-2' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-2 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/pitr-physical-backup-source/conf/backup-minio.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-2/' + /usr/sbin/sed -e 's/type:/type: physical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.gFdnKxBs7I ++ mktemp + local LAST_ERR=/tmp/tmp.y9J1P7IjZe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gFdnKxBs7I perconaservermongodbbackup.psmdb.percona.com/backup-minio-2 created + cat /tmp/tmp.y9J1P7IjZe + rm /tmp/tmp.gFdnKxBs7I /tmp/tmp.y9J1P7IjZe + return 0 + wait_backup backup-minio-2 + local backup_name=backup-minio-2 + local target_state=ready + set +o xtrace waiting for backup-minio-2 to reach ready state..........OK + compare_latest_restorable_time some-name-rs0 backup-minio-2 + local cluster=some-name-rs0 + local backup_name=backup-minio-2 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.sbgpevnrXC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.watJPCgmDU +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.sbgpevnrXC +++ cat /tmp/tmp.watJPCgmDU +++ rm /tmp/tmp.sbgpevnrXC /tmp/tmp.watJPCgmDU +++ return 0 ++ first_timestamp=1775031953 ++ sleep 5 ++ [[ 1775031953 != '' ]] ++ [[ 1775031953 != null ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CsjouzTpzB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hlkPbsR3Ov +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.CsjouzTpzB +++ cat /tmp/tmp.hlkPbsR3Ov +++ rm /tmp/tmp.CsjouzTpzB /tmp/tmp.hlkPbsR3Ov +++ return 0 ++ second_timestamp=1775031953 ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 1775031953 != '' ]] ++ [[ 1775031953 != null ]] ++ [[ 1775031953 == 1775031953 ]] ++ /usr/sbin/date -u -d @1775031953 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2026-04-01T08:25:53Z ++ get_latest_restorable_time_from_backup_object backup-minio-2 ++ local backup_name=backup-minio-2 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-2 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.S5Br6ZJ7Pf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wxDBpFIjGj +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb-backup backup-minio-2 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.S5Br6ZJ7Pf +++ cat /tmp/tmp.wxDBpFIjGj +++ rm /tmp/tmp.S5Br6ZJ7Pf /tmp/tmp.wxDBpFIjGj +++ return 0 ++ latestRestorableTime=2026-04-01T08:25:53Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2026-04-01T08:25:53Z != '' ]] ++ [[ 2026-04-01T08:25:53Z != null ]] ++ echo 2026-04-01T08:25:53Z + backup_time=2026-04-01T08:25:53Z + [[ 2026-04-01T08:25:53Z != 2026\-04\-01T08\:25\:53Z ]] + check_recovery backup-minio-2 latest '' -3rd some-name backupSource + local backup_name=backup-minio-2 + local restore_type=latest + local restore_date= + local cmp_postfix=-3rd + local cluster_name=some-name + local backupSource=backupSource ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZTikUmz9gp ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SbRK4jRnfB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ZTikUmz9gp +++ cat /tmp/tmp.SbRK4jRnfB +++ rm /tmp/tmp.ZTikUmz9gp /tmp/tmp.SbRK4jRnfB +++ return 0 ++ echo 1775031953 + local latest_ts=1775031953 + desc 'write more data before restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by latest ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5IqmlzCwx3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.tkjY3dAjdd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5IqmlzCwx3 ++ cat /tmp/tmp.tkjY3dAjdd ++ rm /tmp/tmp.5IqmlzCwx3 /tmp/tmp.tkjY3dAjdd ++ return 0 + local client_container=psmdb-client-bb8b97679-4p4nr + kubectl_bin exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.W5BSr0gu1F ++ mktemp + local LAST_ERR=/tmp/tmp.jYJBE00Ix1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.W5BSr0gu1F Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("a85675a5-65b5-4719-8cc3-add718def8ab") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.jYJBE00Ix1 + rm /tmp/tmp.W5BSr0gu1F /tmp/tmp.jYJBE00Ix1 + return 0 + [[ -n '' ]] + desc 'Restoring to latest' + set +o xtrace ----------------------------------------------------------------------------------- Restoring to latest ----------------------------------------------------------------------------------- ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rD0xhyOE4k ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xG8XL5nYJw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.rD0xhyOE4k +++ cat /tmp/tmp.xG8XL5nYJw +++ rm /tmp/tmp.rD0xhyOE4k /tmp/tmp.xG8XL5nYJw +++ return 0 ++ echo 1775031953 + local current_ts=1775031953 + retries=0 + [[ 1775031953 -gt 1775031953 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kfFeVtDCO6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Fvx6nVXJh4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kfFeVtDCO6 +++ cat /tmp/tmp.Fvx6nVXJh4 +++ rm /tmp/tmp.kfFeVtDCO6 /tmp/tmp.Fvx6nVXJh4 +++ return 0 ++ echo 1775031953 + latest_ts=1775031953 + retries=1 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 + echo 'Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53)' Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53) + sleep 10 + [[ 1775031953 -gt 1775031953 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uR8ahIhnXq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5WQBh3TDxQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.uR8ahIhnXq +++ cat /tmp/tmp.5WQBh3TDxQ +++ rm /tmp/tmp.uR8ahIhnXq /tmp/tmp.5WQBh3TDxQ +++ return 0 ++ echo 1775031953 + latest_ts=1775031953 + retries=2 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 + echo 'Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53)' Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53) + sleep 10 + [[ 1775031953 -gt 1775031953 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.SJqcFgKqQR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bmrCw5uOpG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.SJqcFgKqQR +++ cat /tmp/tmp.bmrCw5uOpG +++ rm /tmp/tmp.SJqcFgKqQR /tmp/tmp.bmrCw5uOpG +++ return 0 ++ echo 1775031953 + latest_ts=1775031953 + retries=3 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 + echo 'Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53)' Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53) + sleep 10 + [[ 1775031953 -gt 1775031953 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.SqfLPMrebu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xFbDzLKaJU +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.SqfLPMrebu +++ cat /tmp/tmp.xFbDzLKaJU +++ rm /tmp/tmp.SqfLPMrebu /tmp/tmp.xFbDzLKaJU +++ return 0 ++ echo 1775031953 + latest_ts=1775031953 + retries=4 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 + echo 'Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53)' Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53) + sleep 10 + [[ 1775031953 -gt 1775031953 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ckUTbGrBPq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8uWjn8s33W +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ckUTbGrBPq +++ cat /tmp/tmp.8uWjn8s33W +++ rm /tmp/tmp.ckUTbGrBPq /tmp/tmp.8uWjn8s33W +++ return 0 ++ echo 1775031953 + latest_ts=1775031953 + retries=5 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 + echo 'Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53)' Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53) + sleep 10 + [[ 1775031953 -gt 1775031953 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6UZ5NLqv8S ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8LFLHdgFCd +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6UZ5NLqv8S +++ cat /tmp/tmp.8LFLHdgFCd +++ rm /tmp/tmp.6UZ5NLqv8S /tmp/tmp.8LFLHdgFCd +++ return 0 ++ echo 1775031953 + latest_ts=1775031953 + retries=6 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 + echo 'Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53)' Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53) + sleep 10 + [[ 1775031953 -gt 1775031953 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hN58TzXhaF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qKGe6ojz0I +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.hN58TzXhaF +++ cat /tmp/tmp.qKGe6ojz0I +++ rm /tmp/tmp.hN58TzXhaF /tmp/tmp.qKGe6ojz0I +++ return 0 ++ echo 1775031953 + latest_ts=1775031953 + retries=7 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 + echo 'Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53)' Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53) + sleep 10 + [[ 1775031953 -gt 1775031953 ]] + [[ 7 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.t9u2pFPRji ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8NQtbeSnBX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.t9u2pFPRji +++ cat /tmp/tmp.8NQtbeSnBX +++ rm /tmp/tmp.t9u2pFPRji /tmp/tmp.8NQtbeSnBX +++ return 0 ++ echo 1775031953 + latest_ts=1775031953 + retries=8 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 + echo 'Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53)' Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53) + sleep 10 + [[ 1775031953 -gt 1775031953 ]] + [[ 8 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WlWmoVyfsw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YUEzouhBwF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.WlWmoVyfsw +++ cat /tmp/tmp.YUEzouhBwF +++ rm /tmp/tmp.WlWmoVyfsw /tmp/tmp.YUEzouhBwF +++ return 0 ++ echo 1775031953 + latest_ts=1775031953 + retries=9 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 + echo 'Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53)' Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53) + sleep 10 + [[ 1775031953 -gt 1775031953 ]] + [[ 9 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fD43SCVSIR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fWOdKnta1J +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.fD43SCVSIR +++ cat /tmp/tmp.fWOdKnta1J +++ rm /tmp/tmp.fD43SCVSIR /tmp/tmp.fWOdKnta1J +++ return 0 ++ echo 1775031953 + latest_ts=1775031953 + retries=10 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 + echo 'Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53)' Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53) + sleep 10 + [[ 1775031953 -gt 1775031953 ]] + [[ 10 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uDi8lzjBwE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Ig1h777qc6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.uDi8lzjBwE +++ cat /tmp/tmp.Ig1h777qc6 +++ rm /tmp/tmp.uDi8lzjBwE /tmp/tmp.Ig1h777qc6 +++ return 0 ++ echo 1775031953 + latest_ts=1775031953 + retries=11 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 + echo 'Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53)' Waiting for last oplog chunk (2026-04-01 08:25:53) to be 120 seconds older than starting chunk (2026-04-01 08:25:53) + sleep 10 + [[ 1775031953 -gt 1775031953 ]] + [[ 11 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wAeecAStrV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yY8ds0f530 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wAeecAStrV +++ cat /tmp/tmp.yY8ds0f530 +++ rm /tmp/tmp.wAeecAStrV /tmp/tmp.yY8ds0f530 +++ return 0 ++ echo 1775033282 + latest_ts=1775033282 + retries=12 ++ format_date 1775033282 ++ local timestamp=1775033282 +++ TZ=UTC +++ /usr/sbin/date -d@1775033282 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:48:02 ++ format_date 1775031953 ++ local timestamp=1775031953 +++ TZ=UTC +++ /usr/sbin/date -d@1775031953 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-01 08:25:53 + echo 'Waiting for last oplog chunk (2026-04-01 08:48:02) to be 120 seconds older than starting chunk (2026-04-01 08:25:53)' Waiting for last oplog chunk (2026-04-01 08:48:02) to be 120 seconds older than starting chunk (2026-04-01 08:25:53) + sleep 10 + [[ 1775033282 -gt 1775031953 ]] + '[' -z backupSource ']' + desc 'check restore by latest backupSource' + set +o xtrace ----------------------------------------------------------------------------------- check restore by latest backupSource ----------------------------------------------------------------------------------- ++ get_backup_dest backup-minio-2 ++ local backup_name=backup-minio-2 ++ kubectl_bin get psmdb-backup backup-minio-2 -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' ++ sed 's|gs://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5uxxtIVhmU +++ mktemp ++ local LAST_ERR=/tmp/tmp.c3AISpzi5v ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-2 -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5uxxtIVhmU ++ cat /tmp/tmp.c3AISpzi5v ++ rm /tmp/tmp.5uxxtIVhmU /tmp/tmp.c3AISpzi5v ++ return 0 + backup_dest=operator-testing/2026-04-01T08:45:31Z + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/pitr-physical-backup-source/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-2/' + /usr/sbin/sed -e /backupName/d + /usr/sbin/sed -e 's/pitrType:/type: latest/' + '[' -z '' ']' + /usr/sbin/sed -e /date:/d + /usr/sbin/sed -e 's|DESTINATION|operator-testing/2026-04-01T08:45:31Z|' + kubectl_bin apply -f - + '[' -n '' ']' + yq ++ mktemp ++ get_bucket_name backup-minio-2 ++ local backup_name=backup-minio-2 ++ kubectl_bin get psmdb-backup backup-minio-2 -o 'jsonpath={.status.s3.bucket}' +++ mktemp + local LAST_OUT=/tmp/tmp.eElJ7ZwrkS ++ mktemp ++ local LAST_OUT=/tmp/tmp.707d6gURSZ +++ mktemp + local LAST_ERR=/tmp/tmp.VqCuRKTZ7h + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ local LAST_ERR=/tmp/tmp.DKfM2c21XD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-2 -o 'jsonpath={.status.s3.bucket}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.707d6gURSZ ++ cat /tmp/tmp.DKfM2c21XD ++ rm /tmp/tmp.707d6gURSZ /tmp/tmp.DKfM2c21XD ++ return 0 + /usr/sbin/sed -e 's|BUCKET-NAME|operator-testing|' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eElJ7ZwrkS perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-2 created + cat /tmp/tmp.VqCuRKTZ7h + rm /tmp/tmp.eElJ7ZwrkS /tmp/tmp.VqCuRKTZ7h + return 0 + wait_restore backup-minio-2 some-name requested 0 1200 + local backup_name=backup-minio-2 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-2 object to be created.OK Waiting psmdb-restore/restore-backup-minio-2 to reach state "requested" .........OK after 8 minutes + [[ 0 -eq 1 ]] + echo + wait_restore backup-minio-2 some-name ready 0 1600 + local backup_name=backup-minio-2 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1600 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-2 object to be created.OK Waiting psmdb-restore/restore-backup-minio-2 to reach state "ready" ...OK after 2 minutes + [[ 0 -eq 1 ]] + echo + set -o xtrace + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q2xhZ6iu18 +++ mktemp ++ local LAST_ERR=/tmp/tmp.RK9o0YNRDQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q2xhZ6iu18 ++ cat /tmp/tmp.RK9o0YNRDQ ++ rm /tmp/tmp.q2xhZ6iu18 /tmp/tmp.RK9o0YNRDQ ++ return 0 + [[ '' == true ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iaX6Qhec7H +++ mktemp ++ local LAST_ERR=/tmp/tmp.JNK7rUJ3VN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iaX6Qhec7H ++ cat /tmp/tmp.JNK7rUJ3VN ++ rm /tmp/tmp.iaX6Qhec7H /tmp/tmp.JNK7rUJ3VN ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IFYmGsJKir +++ mktemp ++ local LAST_ERR=/tmp/tmp.hpqO6k0vpO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IFYmGsJKir ++ cat /tmp/tmp.hpqO6k0vpO ++ rm /tmp/tmp.IFYmGsJKir /tmp/tmp.hpqO6k0vpO ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness...................................................................... + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QnDaXMI2iB +++ mktemp ++ local LAST_ERR=/tmp/tmp.lwNQqtJ8NC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QnDaXMI2iB ++ cat /tmp/tmp.lwNQqtJ8NC ++ rm /tmp/tmp.QnDaXMI2iB /tmp/tmp.lwNQqtJ8NC ++ return 0 + [[ '' == true ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cZHh1oM7jq +++ mktemp ++ local LAST_ERR=/tmp/tmp.aia9xvFuKB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cZHh1oM7jq ++ cat /tmp/tmp.aia9xvFuKB ++ rm /tmp/tmp.cZHh1oM7jq /tmp/tmp.aia9xvFuKB ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fN24JTzVtT +++ mktemp ++ local LAST_ERR=/tmp/tmp.vYe06HZDdy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fN24JTzVtT ++ cat /tmp/tmp.vYe06HZDdy ++ rm /tmp/tmp.fN24JTzVtT /tmp/tmp.vYe06HZDdy ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qPeEGhQxhu +++ mktemp ++ local LAST_ERR=/tmp/tmp.XymL3prRU7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qPeEGhQxhu ++ cat /tmp/tmp.XymL3prRU7 ++ rm /tmp/tmp.qPeEGhQxhu /tmp/tmp.XymL3prRU7 ++ return 0 + [[ '' == true ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zwxaVBx2TL +++ mktemp ++ local LAST_ERR=/tmp/tmp.8r6qZc3Gbb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zwxaVBx2TL ++ cat /tmp/tmp.8r6qZc3Gbb ++ rm /tmp/tmp.zwxaVBx2TL /tmp/tmp.8r6qZc3Gbb ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3iOP8kdfGy +++ mktemp ++ local LAST_ERR=/tmp/tmp.SVH8zOjwV7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3iOP8kdfGy ++ cat /tmp/tmp.SVH8zOjwV7 ++ rm /tmp/tmp.3iOP8kdfGy /tmp/tmp.SVH8zOjwV7 ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness + sleep 10 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 -3rd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == true ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-04-01T09:03:30+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EHAJcRLN5b +++ mktemp ++ local LAST_ERR=/tmp/tmp.l3S81SZxtq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EHAJcRLN5b ++ cat /tmp/tmp.l3S81SZxtq ++ rm /tmp/tmp.EHAJcRLN5b /tmp/tmp.l3S81SZxtq ++ return 0 + local client_container=psmdb-client-bb8b97679-4p4nr + kubectl_bin exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.bf9DojdwKG ++ mktemp + local LAST_ERR=/tmp/tmp.tpybTfC2eU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-4p4nr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-backup-source-9554.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bf9DojdwKG + cat /tmp/tmp.tpybTfC2eU + rm /tmp/tmp.bf9DojdwKG /tmp/tmp.tpybTfC2eU + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/pitr-physical-backup-source/compare/find-3rd.json /tmp/tmp.qeMq7cb3Mw/find-3rd + desc 'disable pitr' + set +o xtrace ----------------------------------------------------------------------------------- disable pitr ----------------------------------------------------------------------------------- + kubectl patch psmdb some-name --type=merge --patch '{"spec": {"backup": {"pitr": {"enabled": false}}}}' perconaservermongodb.psmdb.percona.com/some-name patched + sleep 20 + desc 'delete all backups' + set +o xtrace ----------------------------------------------------------------------------------- delete all backups ----------------------------------------------------------------------------------- + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.fpsblFnzUq ++ mktemp + local LAST_ERR=/tmp/tmp.4t5Knz2OyB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fpsblFnzUq perconaservermongodbbackup.psmdb.percona.com "backup-minio-1" deleted from pitr-physical-backup-source-9554 namespace perconaservermongodbbackup.psmdb.percona.com "backup-minio-2" deleted from pitr-physical-backup-source-9554 namespace + cat /tmp/tmp.4t5Knz2OyB + rm /tmp/tmp.fpsblFnzUq /tmp/tmp.4t5Knz2OyB + return 0 + desc 'destroy cluster' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster ----------------------------------------------------------------------------------- + destroy pitr-physical-backup-source-9554 + local namespace=pitr-physical-backup-source-9554 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.V6xG34fjhM +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZkhcIP434m ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.V6xG34fjhM ++ cat /tmp/tmp.ZkhcIP434m No resources found in pitr-physical-backup-source-9554 namespace. ++ rm /tmp/tmp.V6xG34fjhM /tmp/tmp.ZkhcIP434m ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.ueMTBQycJK ++ mktemp + local LAST_ERR=/tmp/tmp.OYaA1h7sXD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ueMTBQycJK customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.OYaA1h7sXD + rm /tmp/tmp.ueMTBQycJK /tmp/tmp.OYaA1h7sXD + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.t4Fj0lWHbF ++ mktemp + local LAST_ERR=/tmp/tmp.f26F4Dv1rX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.t4Fj0lWHbF + cat /tmp/tmp.f26F4Dv1rX + rm /tmp/tmp.t4Fj0lWHbF /tmp/tmp.f26F4Dv1rX + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.fCSc9ThsPI ++ mktemp + local LAST_ERR=/tmp/tmp.FE0u0m9kYj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fCSc9ThsPI + cat /tmp/tmp.FE0u0m9kYj + rm /tmp/tmp.fCSc9ThsPI /tmp/tmp.FE0u0m9kYj + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.L1X0gl1AYc ++ mktemp + local LAST_ERR=/tmp/tmp.2xuyvI9V5v + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.L1X0gl1AYc customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.2xuyvI9V5v + rm /tmp/tmp.L1X0gl1AYc /tmp/tmp.2xuyvI9V5v + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.SdXlvHtCIk ++ mktemp + local LAST_ERR=/tmp/tmp.4kgowjbYKg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SdXlvHtCIk clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.4kgowjbYKg + rm /tmp/tmp.SdXlvHtCIk /tmp/tmp.4kgowjbYKg + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.uRNa3O6N4T ++ mktemp + local LAST_ERR=/tmp/tmp.Xr8P7xeg4H + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.uRNa3O6N4T + cat /tmp/tmp.Xr8P7xeg4H Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.uRNa3O6N4T + cat /tmp/tmp.Xr8P7xeg4H Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.uRNa3O6N4T + cat /tmp/tmp.Xr8P7xeg4H Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.uRNa3O6N4T + cat /tmp/tmp.Xr8P7xeg4H Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.uRNa3O6N4T /tmp/tmp.Xr8P7xeg4H + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace pitr-physical-backup-source-9554 + rm -rf /tmp/tmp.qeMq7cb3Mw + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.dVIvou5sZE + local LAST_OUT=/tmp/tmp.CPggGTuO9q ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.yToqniT0or + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.OlcCafG10k + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace pitr-physical-backup-source-9554 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator