Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/logs/pitr-physical.log grep: warning: stray \ before - Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + main + create_infra pitr-physical-23919 + local ns=pitr-physical-23919 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.wjTofA5Qvs ++ mktemp + local LAST_ERR=/tmp/tmp.kDUaKu9p09 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wjTofA5Qvs customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.kDUaKu9p09 + rm /tmp/tmp.wjTofA5Qvs /tmp/tmp.kDUaKu9p09 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.p2hYZL9AXk ++ mktemp + local LAST_ERR=/tmp/tmp.UFIzyMRrJO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p2hYZL9AXk + cat /tmp/tmp.UFIzyMRrJO + rm /tmp/tmp.p2hYZL9AXk /tmp/tmp.UFIzyMRrJO + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.dLK2Q5BfcN ++ mktemp + local LAST_ERR=/tmp/tmp.BOVwFhAJ5A + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dLK2Q5BfcN + cat /tmp/tmp.BOVwFhAJ5A + rm /tmp/tmp.dLK2Q5BfcN /tmp/tmp.BOVwFhAJ5A + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.rS9L6vz6mB ++ mktemp + local LAST_ERR=/tmp/tmp.oUSRgrVtPn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rS9L6vz6mB + cat /tmp/tmp.oUSRgrVtPn + rm /tmp/tmp.rS9L6vz6mB /tmp/tmp.oUSRgrVtPn + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.R9wqSqiSle ++ mktemp + local LAST_ERR=/tmp/tmp.17xTaBPCbf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.R9wqSqiSle + cat /tmp/tmp.17xTaBPCbf + rm /tmp/tmp.R9wqSqiSle /tmp/tmp.17xTaBPCbf + return 0 + check_crd_for_deletion PR-1961-5b0cc669 + local git_tag=PR-1961-5b0cc669 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1961-5b0cc669/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/sbin/sed s/---//g + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zVhKoq7FZz +++ mktemp ++ local LAST_ERR=/tmp/tmp.gdrLyjQ72v ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.zVhKoq7FZz ++ cat /tmp/tmp.gdrLyjQ72v Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.zVhKoq7FZz ++ cat /tmp/tmp.gdrLyjQ72v Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.zVhKoq7FZz ++ cat /tmp/tmp.gdrLyjQ72v Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.zVhKoq7FZz ++ cat /tmp/tmp.gdrLyjQ72v Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.zVhKoq7FZz /tmp/tmp.gdrLyjQ72v ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + awk '{print$1}' ++ mktemp + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.IokxlGeyys egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_OUT=/tmp/tmp.zBXcTpTI2J ++ mktemp + local LAST_ERR=/tmp/tmp.AGoOGWjZCJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.2eT57OTHMs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IokxlGeyys + cat /tmp/tmp.AGoOGWjZCJ + rm /tmp/tmp.IokxlGeyys /tmp/tmp.AGoOGWjZCJ + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zBXcTpTI2J namespace "psmdb-operator" deleted + cat /tmp/tmp.2eT57OTHMs + rm /tmp/tmp.zBXcTpTI2J /tmp/tmp.2eT57OTHMs + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.j9W7F0fCaA ++ mktemp + local LAST_ERR=/tmp/tmp.Fyi5xRrpWG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.j9W7F0fCaA + cat /tmp/tmp.Fyi5xRrpWG + rm /tmp/tmp.j9W7F0fCaA /tmp/tmp.Fyi5xRrpWG + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.nQYLiPDN0w ++ mktemp + local LAST_ERR=/tmp/tmp.c3Udj492th + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nQYLiPDN0w namespace/psmdb-operator created + cat /tmp/tmp.c3Udj492th + rm /tmp/tmp.nQYLiPDN0w /tmp/tmp.c3Udj492th + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.IgHdqFUMXt +++ mktemp ++ local LAST_ERR=/tmp/tmp.VTDzwyQmLJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IgHdqFUMXt ++ cat /tmp/tmp.VTDzwyQmLJ ++ rm /tmp/tmp.IgHdqFUMXt /tmp/tmp.VTDzwyQmLJ ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-5b0cc669-2-cluster10 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.duc1xPNUtm ++ mktemp + local LAST_ERR=/tmp/tmp.UbiQsYFTqL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-5b0cc669-2-cluster10 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.duc1xPNUtm Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-5b0cc669-2-cluster10" modified. + cat /tmp/tmp.UbiQsYFTqL + rm /tmp/tmp.duc1xPNUtm /tmp/tmp.UbiQsYFTqL + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.ie1Bvp4I4Q ++ mktemp + local LAST_ERR=/tmp/tmp.Syy6QIc4SQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ie1Bvp4I4Q customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.Syy6QIc4SQ + rm /tmp/tmp.ie1Bvp4I4Q /tmp/tmp.Syy6QIc4SQ + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.l8t4ZtQgwJ ++ mktemp + local LAST_ERR=/tmp/tmp.ofIlzpL8H7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.l8t4ZtQgwJ clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.ofIlzpL8H7 + rm /tmp/tmp.l8t4ZtQgwJ /tmp/tmp.ofIlzpL8H7 + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.rtshNOcDIA ++ mktemp + local LAST_ERR=/tmp/tmp.3yYrOOoyrY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rtshNOcDIA deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.3yYrOOoyrY + rm /tmp/tmp.rtshNOcDIA /tmp/tmp.3yYrOOoyrY + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.XQglN9QAV5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.6ViGigkUA3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XQglN9QAV5 ++ cat /tmp/tmp.6ViGigkUA3 ++ rm /tmp/tmp.XQglN9QAV5 /tmp/tmp.6ViGigkUA3 ++ return 0 + wait_operator_pod percona-server-mongodb-operator-5464c7ccdb-nm6zk + local pod=percona-server-mongodb-operator-5464c7ccdb-nm6zk + set +o xtrace waiting for pod/percona-server-mongodb-operator-5464c7ccdb-nm6zk to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.u28JMQRTS9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.rqmIqNefF0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.u28JMQRTS9 ++ cat /tmp/tmp.rqmIqNefF0 ++ rm /tmp/tmp.u28JMQRTS9 /tmp/tmp.rqmIqNefF0 ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-5464c7ccdb-nm6zk ++ mktemp + local LAST_OUT=/tmp/tmp.PkVJAXW0g3 ++ mktemp + local LAST_ERR=/tmp/tmp.sss0zvW5nk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-5464c7ccdb-nm6zk + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PkVJAXW0g3 + cat /tmp/tmp.sss0zvW5nk + rm /tmp/tmp.PkVJAXW0g3 /tmp/tmp.sss0zvW5nk + return 0 2025-12-04T20:28:01.134Z INFO setup Manager starting up {"gitCommit": "5b0cc6694bb3fee55fa4cc155ac44e1b0423e748", "gitBranch": "PR-1961-5b0cc669", "buildTime": "", "goVersion": "go1.25.5", "os": "linux", "arch": "amd64"} + create_namespace pitr-physical-23919 + local namespace=pitr-physical-23919 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' ++ mktemp + '[' -n '' ']' + desc 'cleaned up old namespaces pitr-physical-23919' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pitr-physical-23919 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pitr-physical-23919 --ignore-not-found + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.4uj35DfFDV ++ mktemp ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.niyKfamJZO + local LAST_ERR=/tmp/tmp.trBEkGAags + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ mktemp + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.inOi0nGCxX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace pitr-physical-23919 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4uj35DfFDV + cat /tmp/tmp.trBEkGAags + rm /tmp/tmp.4uj35DfFDV /tmp/tmp.trBEkGAags + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.niyKfamJZO + cat /tmp/tmp.inOi0nGCxX + rm /tmp/tmp.niyKfamJZO /tmp/tmp.inOi0nGCxX + return 0 + kubectl_bin wait --for=delete namespace pitr-physical-23919 ++ mktemp + local LAST_OUT=/tmp/tmp.TAovW5AKPE ++ mktemp + local LAST_ERR=/tmp/tmp.mxPSANiPhC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace pitr-physical-23919 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TAovW5AKPE + cat /tmp/tmp.mxPSANiPhC + rm /tmp/tmp.TAovW5AKPE /tmp/tmp.mxPSANiPhC + return 0 + desc 'create namespace pitr-physical-23919' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pitr-physical-23919 ----------------------------------------------------------------------------------- + kubectl_bin create namespace pitr-physical-23919 ++ mktemp + local LAST_OUT=/tmp/tmp.UYpRGdGWcm ++ mktemp + local LAST_ERR=/tmp/tmp.ltJVYDOmWm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace pitr-physical-23919 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UYpRGdGWcm namespace/pitr-physical-23919 created + cat /tmp/tmp.ltJVYDOmWm + rm /tmp/tmp.UYpRGdGWcm /tmp/tmp.ltJVYDOmWm + return 0 + set_kube_ctx pitr-physical-23919 + local namespace=pitr-physical-23919 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.FMl5vrBgcn +++ mktemp ++ local LAST_ERR=/tmp/tmp.s8Lpfie2Sa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FMl5vrBgcn ++ cat /tmp/tmp.s8Lpfie2Sa ++ rm /tmp/tmp.FMl5vrBgcn /tmp/tmp.s8Lpfie2Sa ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-5b0cc669-2-cluster10 --namespace=pitr-physical-23919 ++ mktemp + local LAST_OUT=/tmp/tmp.vxduAZhwRQ ++ mktemp + local LAST_ERR=/tmp/tmp.ny3RwD6MlR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-5b0cc669-2-cluster10 --namespace=pitr-physical-23919 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vxduAZhwRQ Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-5b0cc669-2-cluster10" modified. + cat /tmp/tmp.ny3RwD6MlR + rm /tmp/tmp.vxduAZhwRQ /tmp/tmp.ny3RwD6MlR + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Thu Dec 4 20:28:45 2025 NAMESPACE: pitr-physical-23919 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.pitr-physical-23919.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace pitr-physical-23919 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace pitr-physical-23919 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace pitr-physical-23919 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace pitr-physical-23919 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hEBsCHaiX3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.uipmcZ1qeH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hEBsCHaiX3 ++ cat /tmp/tmp.uipmcZ1qeH ++ rm /tmp/tmp.hEBsCHaiX3 /tmp/tmp.uipmcZ1qeH ++ return 0 + MINIO_POD=minio-service-d9589b474-9wxt8 + wait_pod minio-service-d9589b474-9wxt8 + local pod=minio-service-d9589b474-9wxt8 + set +o xtrace waiting for pod/minio-service-d9589b474-9wxt8 to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-physical-23919.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.TJSiLHEn76 ++ mktemp + local LAST_ERR=/tmp/tmp.6Nd7qtbZVw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-physical-23919.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TJSiLHEn76 service/minio-service created + cat /tmp/tmp.6Nd7qtbZVw + rm /tmp/tmp.TJSiLHEn76 /tmp/tmp.6Nd7qtbZVw + return 0 + create_minio_bucket operator-testing + local bucket=operator-testing + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.oq0jA6dIqN ++ mktemp + local LAST_ERR=/tmp/tmp.brQyT5Sjvm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oq0jA6dIqN make_bucket: operator-testing pod "aws-cli" deleted from pitr-physical-23919 namespace + cat /tmp/tmp.brQyT5Sjvm All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: unable to upgrade connection: container aws-cli not found in pod aws-cli_pitr-physical-23919 + rm /tmp/tmp.oq0jA6dIqN /tmp/tmp.brQyT5Sjvm + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/minio-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.UBQy8ucJUM ++ mktemp + local LAST_ERR=/tmp/tmp.pVU2amjdXI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/minio-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UBQy8ucJUM secret/some-users created deployment.apps/psmdb-client created secret/minio-secret created + cat /tmp/tmp.pVU2amjdXI + rm /tmp/tmp.UBQy8ucJUM /tmp/tmp.pVU2amjdXI + return 0 + cluster=some-name + desc 'create first PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/conf/some-name-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/conf/some-name-rs0.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669"' + yq eval '(.spec | select(has("backup"))).backup.image = "percona/percona-backup-mongodb:2.11.0"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.8N3EnEDFJ1 ++ mktemp + local LAST_ERR=/tmp/tmp.hgHcoTeb5s + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8N3EnEDFJ1 perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.hgHcoTeb5s + rm /tmp/tmp.8N3EnEDFJ1 /tmp/tmp.hgHcoTeb5s + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.....OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.....OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XrqsPH3p7w +++ mktemp ++ local LAST_ERR=/tmp/tmp.zTW55DWkjO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XrqsPH3p7w ++ cat /tmp/tmp.zTW55DWkjO ++ rm /tmp/tmp.XrqsPH3p7w /tmp/tmp.zTW55DWkjO ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dHDaG4ctHo +++ mktemp ++ local LAST_ERR=/tmp/tmp.xONNJiRAM6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dHDaG4ctHo ++ cat /tmp/tmp.xONNJiRAM6 ++ rm /tmp/tmp.dHDaG4ctHo /tmp/tmp.xONNJiRAM6 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.o6r0lkij9h +++ mktemp ++ local LAST_ERR=/tmp/tmp.580giKkKcq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.o6r0lkij9h ++ cat /tmp/tmp.580giKkKcq ++ rm /tmp/tmp.o6r0lkij9h /tmp/tmp.580giKkKcq ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.......................................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.C8G8Hh3SYn +++ mktemp ++ local LAST_ERR=/tmp/tmp.gYcoqmE9XS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.C8G8Hh3SYn ++ cat /tmp/tmp.gYcoqmE9XS ++ rm /tmp/tmp.C8G8Hh3SYn /tmp/tmp.gYcoqmE9XS ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lXuI9sJvcd +++ mktemp ++ local LAST_ERR=/tmp/tmp.uJYCgZTE1e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lXuI9sJvcd ++ cat /tmp/tmp.uJYCgZTE1e ++ rm /tmp/tmp.lXuI9sJvcd /tmp/tmp.uJYCgZTE1e ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wy0Nla6Mkm +++ mktemp ++ local LAST_ERR=/tmp/tmp.4lkipwsNEa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wy0Nla6Mkm ++ cat /tmp/tmp.4lkipwsNEa ++ rm /tmp/tmp.wy0Nla6Mkm /tmp/tmp.4lkipwsNEa ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + write_initial_data + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.pitr-physical-23919 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.pitr-physical-23919 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PSHXHHE2Nn +++ mktemp ++ local LAST_ERR=/tmp/tmp.1jDm1avG8r ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PSHXHHE2Nn ++ cat /tmp/tmp.1jDm1avG8r ++ rm /tmp/tmp.PSHXHHE2Nn /tmp/tmp.1jDm1avG8r ++ return 0 + local client_container=psmdb-client-696897d69b-7zv7m + kubectl_bin exec psmdb-client-696897d69b-7zv7m -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.FfbHGS6kSC ++ mktemp + local LAST_ERR=/tmp/tmp.2BHWGt7Wve + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7zv7m -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FfbHGS6kSC Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("a05b0aa5-ed99-4a14-a86e-05c443426f7a") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.2BHWGt7Wve + rm /tmp/tmp.FfbHGS6kSC /tmp/tmp.2BHWGt7Wve + return 0 + sleep 2 + write_document + local cmp_postfix= + local sleep_value=0 + log 'write initial data, read from all' + set +o xtrace [2025-12-04T20:32:57+0000] write initial data, read from all + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-physical-23919 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-23919 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KgXh68uieN +++ mktemp ++ local LAST_ERR=/tmp/tmp.e1WyMjq8Ox ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KgXh68uieN ++ cat /tmp/tmp.e1WyMjq8Ox ++ rm /tmp/tmp.KgXh68uieN /tmp/tmp.e1WyMjq8Ox ++ return 0 + local client_container=psmdb-client-696897d69b-7zv7m + kubectl_bin exec psmdb-client-696897d69b-7zv7m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.gg4NwNT1ql ++ mktemp + local LAST_ERR=/tmp/tmp.mFXHqICyUj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7zv7m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gg4NwNT1ql Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("e07f15a4-922d-4de0-a4f2-3c15be08045b") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.mFXHqICyUj + rm /tmp/tmp.gg4NwNT1ql /tmp/tmp.mFXHqICyUj + return 0 + sleep 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-23919 + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-23919 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-23919 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-23919 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HzlexF0xKy +++ mktemp ++ local LAST_ERR=/tmp/tmp.sSSxxnvloi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HzlexF0xKy ++ cat /tmp/tmp.sSSxxnvloi ++ rm /tmp/tmp.HzlexF0xKy /tmp/tmp.sSSxxnvloi ++ return 0 + local client_container=psmdb-client-696897d69b-7zv7m + kubectl_bin exec psmdb-client-696897d69b-7zv7m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.z6Z6fC0Xpg ++ mktemp + local LAST_ERR=/tmp/tmp.7cyGQCOWbi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7zv7m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.z6Z6fC0Xpg + cat /tmp/tmp.7cyGQCOWbi + rm /tmp/tmp.z6Z6fC0Xpg /tmp/tmp.7cyGQCOWbi + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/compare/find.json /tmp/tmp.WRgTj3fQjY/find + desc 'wait for pbm agents to start' + set +o xtrace ----------------------------------------------------------------------------------- wait for pbm agents to start ----------------------------------------------------------------------------------- + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-0...2025-12-04T20:32:32.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-1...2025-12-04T20:32:29.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-2...2025-12-04T20:32:31.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-0 + local agent_pod=some-name-rs1-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-0...2025-12-04T20:32:33.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-1 + local agent_pod=some-name-rs1-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-1...2025-12-04T20:32:32.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-2 + local agent_pod=some-name-rs1-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-2...2025-12-04T20:32:31.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-0 + local agent_pod=some-name-rs2-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-0...2025-12-04T20:32:30.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-1 + local agent_pod=some-name-rs2-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-1...2025-12-04T20:32:30.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-2 + local agent_pod=some-name-rs2-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-2...2025-12-04T20:32:32.000+0000 I listening for the commands + log 'Sleeping for 360 seconds to avoid PBM-1265' + set +o xtrace [2025-12-04T20:33:17+0000] Sleeping for 360 seconds to avoid PBM-1265 + sleep 360 + backup_name_minio=backup-minio + run_backup backup-minio 2 physical + local name=backup-minio + local idx=2 + local type=physical + desc 'run backup backup-minio-2' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-2 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/conf/backup-minio.yml + /usr/sbin/sed -e 's/type:/type: physical/' + /usr/sbin/sed -e 's/name:/name: backup-minio-2/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.NUITRFaNxG ++ mktemp + local LAST_ERR=/tmp/tmp.piFXbPw2Hp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NUITRFaNxG perconaservermongodbbackup.psmdb.percona.com/backup-minio-2 created + cat /tmp/tmp.piFXbPw2Hp + rm /tmp/tmp.NUITRFaNxG /tmp/tmp.piFXbPw2Hp + return 0 + wait_backup backup-minio-2 + local backup_name=backup-minio-2 + local target_state=ready + set +o xtrace waiting for backup-minio-2 to reach ready state.........OK + sleep 5 + write_document -2nd 130 + local cmp_postfix=-2nd + local sleep_value=130 + log 'write initial data, read from all' + set +o xtrace [2025-12-04T20:39:44+0000] write initial data, read from all + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-physical-23919 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-23919 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6iZEW0WUdc +++ mktemp ++ local LAST_ERR=/tmp/tmp.Jiwbr3axt4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6iZEW0WUdc ++ cat /tmp/tmp.Jiwbr3axt4 ++ rm /tmp/tmp.6iZEW0WUdc /tmp/tmp.Jiwbr3axt4 ++ return 0 + local client_container=psmdb-client-696897d69b-7zv7m + kubectl_bin exec psmdb-client-696897d69b-7zv7m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.A4T3PyCftK ++ mktemp + local LAST_ERR=/tmp/tmp.IFdsdOX1KW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7zv7m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.A4T3PyCftK Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("61501250-e917-4147-a195-5602f2f05323") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.IFdsdOX1KW + rm /tmp/tmp.A4T3PyCftK /tmp/tmp.IFdsdOX1KW + return 0 + sleep 130 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-23919 -2nd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-23919 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-23919 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-23919 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fJnHTLDIIT +++ mktemp ++ local LAST_ERR=/tmp/tmp.PxUnF6PMQL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fJnHTLDIIT ++ cat /tmp/tmp.PxUnF6PMQL ++ rm /tmp/tmp.fJnHTLDIIT /tmp/tmp.PxUnF6PMQL ++ return 0 + local client_container=psmdb-client-696897d69b-7zv7m + kubectl_bin exec psmdb-client-696897d69b-7zv7m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.v5GnRBddZL ++ mktemp + local LAST_ERR=/tmp/tmp.2NMK0XTiDt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7zv7m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.v5GnRBddZL + cat /tmp/tmp.2NMK0XTiDt + rm /tmp/tmp.v5GnRBddZL /tmp/tmp.2NMK0XTiDt + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/compare/find-2nd.json /tmp/tmp.WRgTj3fQjY/find-2nd ++ date +%s + last_write=1764880920 ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WV1OHY2mmj ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bL29P5DwAV +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.WV1OHY2mmj +++ cat /tmp/tmp.bL29P5DwAV +++ rm /tmp/tmp.WV1OHY2mmj /tmp/tmp.bL29P5DwAV +++ return 0 ++ echo 1764880911 + last_chunk=1764880911 + log 'last write: 1764880920' + set +o xtrace [2025-12-04T20:42:02+0000] last write: 1764880920 + log 'last chunk: 1764880911' + set +o xtrace [2025-12-04T20:42:02+0000] last chunk: 1764880911 + retries=0 + [[ 1764880911 -gt 1764880920 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XsqWY1ZydS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iiYdw80y5z +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.XsqWY1ZydS +++ cat /tmp/tmp.iiYdw80y5z +++ rm /tmp/tmp.XsqWY1ZydS /tmp/tmp.iiYdw80y5z +++ return 0 ++ echo 1764880911 + last_chunk=1764880911 + retries=1 ++ format_date 1764880911 ++ local timestamp=1764880911 +++ TZ=UTC +++ /usr/sbin/date -d@1764880911 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:41:51 ++ format_date 1764880920 ++ local timestamp=1764880920 +++ TZ=UTC +++ /usr/sbin/date -d@1764880920 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:42:00 + log 'Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00)' + set +o xtrace [2025-12-04T20:42:04+0000] Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00) + sleep 10 + [[ 1764880911 -gt 1764880920 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.bNZwNvmPAB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cp6aW28gou +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.bNZwNvmPAB +++ cat /tmp/tmp.cp6aW28gou +++ rm /tmp/tmp.bNZwNvmPAB /tmp/tmp.cp6aW28gou +++ return 0 ++ echo 1764880911 + last_chunk=1764880911 + retries=2 ++ format_date 1764880911 ++ local timestamp=1764880911 +++ TZ=UTC +++ /usr/sbin/date -d@1764880911 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:41:51 ++ format_date 1764880920 ++ local timestamp=1764880920 +++ TZ=UTC +++ /usr/sbin/date -d@1764880920 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:42:00 + log 'Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00)' + set +o xtrace [2025-12-04T20:42:17+0000] Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00) + sleep 10 + [[ 1764880911 -gt 1764880920 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XmW8g0u8KX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SLJeJYUcmn +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.XmW8g0u8KX +++ cat /tmp/tmp.SLJeJYUcmn +++ rm /tmp/tmp.XmW8g0u8KX /tmp/tmp.SLJeJYUcmn +++ return 0 ++ echo 1764880911 + last_chunk=1764880911 + retries=3 ++ format_date 1764880911 ++ local timestamp=1764880911 +++ TZ=UTC +++ /usr/sbin/date -d@1764880911 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:41:51 ++ format_date 1764880920 ++ local timestamp=1764880920 +++ TZ=UTC +++ /usr/sbin/date -d@1764880920 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:42:00 + log 'Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00)' + set +o xtrace [2025-12-04T20:42:29+0000] Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00) + sleep 10 + [[ 1764880911 -gt 1764880920 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5weiXqGzHz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5y3gCKe7N2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.5weiXqGzHz +++ cat /tmp/tmp.5y3gCKe7N2 +++ rm /tmp/tmp.5weiXqGzHz /tmp/tmp.5y3gCKe7N2 +++ return 0 ++ echo 1764880911 + last_chunk=1764880911 + retries=4 ++ format_date 1764880911 ++ local timestamp=1764880911 +++ TZ=UTC +++ /usr/sbin/date -d@1764880911 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:41:51 ++ format_date 1764880920 ++ local timestamp=1764880920 +++ TZ=UTC +++ /usr/sbin/date -d@1764880920 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:42:00 + log 'Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00)' + set +o xtrace [2025-12-04T20:42:41+0000] Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00) + sleep 10 + [[ 1764880911 -gt 1764880920 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.l7zHlnd23s ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0a4AHgtz4S +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.l7zHlnd23s +++ cat /tmp/tmp.0a4AHgtz4S +++ rm /tmp/tmp.l7zHlnd23s /tmp/tmp.0a4AHgtz4S +++ return 0 ++ echo 1764880911 + last_chunk=1764880911 + retries=5 ++ format_date 1764880911 ++ local timestamp=1764880911 +++ TZ=UTC +++ /usr/sbin/date -d@1764880911 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:41:51 ++ format_date 1764880920 ++ local timestamp=1764880920 +++ TZ=UTC +++ /usr/sbin/date -d@1764880920 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:42:00 + log 'Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00)' + set +o xtrace [2025-12-04T20:42:53+0000] Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00) + sleep 10 + [[ 1764880911 -gt 1764880920 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vuiAvbjvwd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PgyojI5uOb +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.vuiAvbjvwd +++ cat /tmp/tmp.PgyojI5uOb +++ rm /tmp/tmp.vuiAvbjvwd /tmp/tmp.PgyojI5uOb +++ return 0 ++ echo 1764880911 + last_chunk=1764880911 + retries=6 ++ format_date 1764880911 ++ local timestamp=1764880911 +++ TZ=UTC +++ /usr/sbin/date -d@1764880911 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:41:51 ++ format_date 1764880920 ++ local timestamp=1764880920 +++ TZ=UTC +++ /usr/sbin/date -d@1764880920 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:42:00 + log 'Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00)' + set +o xtrace [2025-12-04T20:43:06+0000] Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00) + sleep 10 + [[ 1764880911 -gt 1764880920 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GArVB9CgYx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VFw8pj4vxo +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.GArVB9CgYx +++ cat /tmp/tmp.VFw8pj4vxo +++ rm /tmp/tmp.GArVB9CgYx /tmp/tmp.VFw8pj4vxo +++ return 0 ++ echo 1764880911 + last_chunk=1764880911 + retries=7 ++ format_date 1764880911 ++ local timestamp=1764880911 +++ TZ=UTC +++ /usr/sbin/date -d@1764880911 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:41:51 ++ format_date 1764880920 ++ local timestamp=1764880920 +++ TZ=UTC +++ /usr/sbin/date -d@1764880920 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:42:00 + log 'Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00)' + set +o xtrace [2025-12-04T20:43:18+0000] Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00) + sleep 10 + [[ 1764880911 -gt 1764880920 ]] + [[ 7 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UT7Ns4IIWK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hmuifYj0Q1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.UT7Ns4IIWK +++ cat /tmp/tmp.hmuifYj0Q1 +++ rm /tmp/tmp.UT7Ns4IIWK /tmp/tmp.hmuifYj0Q1 +++ return 0 ++ echo 1764880911 + last_chunk=1764880911 + retries=8 ++ format_date 1764880911 ++ local timestamp=1764880911 +++ TZ=UTC +++ /usr/sbin/date -d@1764880911 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:41:51 ++ format_date 1764880920 ++ local timestamp=1764880920 +++ TZ=UTC +++ /usr/sbin/date -d@1764880920 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:42:00 + log 'Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00)' + set +o xtrace [2025-12-04T20:43:30+0000] Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00) + sleep 10 + [[ 1764880911 -gt 1764880920 ]] + [[ 8 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.eKinf723VN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.htYiVGcixH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.eKinf723VN +++ cat /tmp/tmp.htYiVGcixH +++ rm /tmp/tmp.eKinf723VN /tmp/tmp.htYiVGcixH +++ return 0 ++ echo 1764880911 + last_chunk=1764880911 + retries=9 ++ format_date 1764880911 ++ local timestamp=1764880911 +++ TZ=UTC +++ /usr/sbin/date -d@1764880911 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:41:51 ++ format_date 1764880920 ++ local timestamp=1764880920 +++ TZ=UTC +++ /usr/sbin/date -d@1764880920 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:42:00 + log 'Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00)' + set +o xtrace [2025-12-04T20:43:42+0000] Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00) + sleep 10 + [[ 1764880911 -gt 1764880920 ]] + [[ 9 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dHSnJhNsqr ++++ mktemp +++ local LAST_ERR=/tmp/tmp.sHxyRx9TTP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.dHSnJhNsqr +++ cat /tmp/tmp.sHxyRx9TTP +++ rm /tmp/tmp.dHSnJhNsqr /tmp/tmp.sHxyRx9TTP +++ return 0 ++ echo 1764880911 + last_chunk=1764880911 + retries=10 ++ format_date 1764880911 ++ local timestamp=1764880911 +++ TZ=UTC +++ /usr/sbin/date -d@1764880911 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:41:51 ++ format_date 1764880920 ++ local timestamp=1764880920 +++ TZ=UTC +++ /usr/sbin/date -d@1764880920 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:42:00 + log 'Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00)' + set +o xtrace [2025-12-04T20:43:55+0000] Waiting for last oplog chunk (2025-12-04 20:41:51) to be greater than last write (2025-12-04 20:42:00) + sleep 10 + [[ 1764880911 -gt 1764880920 ]] + [[ 10 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ejNTpYmTdJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ufe20fBBtw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ejNTpYmTdJ +++ cat /tmp/tmp.ufe20fBBtw +++ rm /tmp/tmp.ejNTpYmTdJ /tmp/tmp.ufe20fBBtw +++ return 0 ++ echo 1764880919 + last_chunk=1764880919 + retries=11 ++ format_date 1764880919 ++ local timestamp=1764880919 +++ TZ=UTC +++ /usr/sbin/date -d@1764880919 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:41:59 ++ format_date 1764880920 ++ local timestamp=1764880920 +++ TZ=UTC +++ /usr/sbin/date -d@1764880920 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:42:00 + log 'Waiting for last oplog chunk (2025-12-04 20:41:59) to be greater than last write (2025-12-04 20:42:00)' + set +o xtrace [2025-12-04T20:44:07+0000] Waiting for last oplog chunk (2025-12-04 20:41:59) to be greater than last write (2025-12-04 20:42:00) + sleep 10 + [[ 1764880919 -gt 1764880920 ]] + [[ 11 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.HCuObwHmRg ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ezlkiFK0Lg +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.HCuObwHmRg +++ cat /tmp/tmp.ezlkiFK0Lg +++ rm /tmp/tmp.HCuObwHmRg /tmp/tmp.ezlkiFK0Lg +++ return 0 ++ echo 1764881031 + last_chunk=1764881031 + retries=12 ++ format_date 1764881031 ++ local timestamp=1764881031 +++ TZ=UTC +++ /usr/sbin/date -d@1764881031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:43:51 ++ format_date 1764880920 ++ local timestamp=1764880920 +++ TZ=UTC +++ /usr/sbin/date -d@1764880920 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:42:00 + log 'Waiting for last oplog chunk (2025-12-04 20:43:51) to be greater than last write (2025-12-04 20:42:00)' + set +o xtrace [2025-12-04T20:44:19+0000] Waiting for last oplog chunk (2025-12-04 20:43:51) to be greater than last write (2025-12-04 20:42:00) + sleep 10 + [[ 1764881031 -gt 1764880920 ]] + sleep 10 + check_recovery backup-minio-2 date 1764881031 -2nd some-name + local backup_name=backup-minio-2 + local restore_type=date + local restore_date=1764881031 + local cmp_postfix=-2nd + local cluster_name=some-name + local backupSource= ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Sg9GrETNnY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.el9WMKwaW0 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Sg9GrETNnY +++ cat /tmp/tmp.el9WMKwaW0 +++ rm /tmp/tmp.Sg9GrETNnY /tmp/tmp.el9WMKwaW0 +++ return 0 ++ echo 1764881031 + local latest_ts=1764881031 + desc 'write more data before restore by date' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by date ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.pitr-physical-23919 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-23919 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eIAsqvITiK +++ mktemp ++ local LAST_ERR=/tmp/tmp.mfmD1xiCSN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eIAsqvITiK ++ cat /tmp/tmp.mfmD1xiCSN ++ rm /tmp/tmp.eIAsqvITiK /tmp/tmp.mfmD1xiCSN ++ return 0 + local client_container=psmdb-client-696897d69b-7zv7m + kubectl_bin exec psmdb-client-696897d69b-7zv7m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.8qJI4kkHti ++ mktemp + local LAST_ERR=/tmp/tmp.CuC84MaaKo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7zv7m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8qJI4kkHti Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("283bc73e-1b78-469e-840b-a43bfa1455be") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.CuC84MaaKo + rm /tmp/tmp.8qJI4kkHti /tmp/tmp.CuC84MaaKo + return 0 + [[ -n 1764881031 ]] ++ format_date 1764881031 ++ local timestamp=1764881031 +++ TZ=UTC +++ /usr/sbin/date -d@1764881031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:43:51 + desc 'Restoring to time 2025-12-04 20:43:51' + set +o xtrace ----------------------------------------------------------------------------------- Restoring to time 2025-12-04 20:43:51 ----------------------------------------------------------------------------------- + retries=0 + [[ 1764881031 -gt 1764881031 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.06zeL1SLEs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.l2uxUwcSmV +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.06zeL1SLEs +++ cat /tmp/tmp.l2uxUwcSmV +++ rm /tmp/tmp.06zeL1SLEs /tmp/tmp.l2uxUwcSmV +++ return 0 ++ echo 1764881031 + latest_ts=1764881031 + retries=1 ++ format_date 1764881031 ++ local timestamp=1764881031 +++ TZ=UTC +++ /usr/sbin/date -d@1764881031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:43:51 ++ format_date 1764881031 ++ local timestamp=1764881031 +++ TZ=UTC +++ /usr/sbin/date -d@1764881031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:43:51 + echo 'Waiting for last oplog chunk (2025-12-04 20:43:51) to be greater than restore target (2025-12-04 20:43:51)' Waiting for last oplog chunk (2025-12-04 20:43:51) to be greater than restore target (2025-12-04 20:43:51) + sleep 10 + [[ 1764881031 -gt 1764881031 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7BYSAmtrDY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.lHegl0H1dV +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.7BYSAmtrDY +++ cat /tmp/tmp.lHegl0H1dV +++ rm /tmp/tmp.7BYSAmtrDY /tmp/tmp.lHegl0H1dV +++ return 0 ++ echo 1764881031 + latest_ts=1764881031 + retries=2 ++ format_date 1764881031 ++ local timestamp=1764881031 +++ TZ=UTC +++ /usr/sbin/date -d@1764881031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:43:51 ++ format_date 1764881031 ++ local timestamp=1764881031 +++ TZ=UTC +++ /usr/sbin/date -d@1764881031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:43:51 + echo 'Waiting for last oplog chunk (2025-12-04 20:43:51) to be greater than restore target (2025-12-04 20:43:51)' Waiting for last oplog chunk (2025-12-04 20:43:51) to be greater than restore target (2025-12-04 20:43:51) + sleep 10 + [[ 1764881031 -gt 1764881031 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DE9CtLHEu3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yY7I0g8ZiE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.DE9CtLHEu3 +++ cat /tmp/tmp.yY7I0g8ZiE +++ rm /tmp/tmp.DE9CtLHEu3 /tmp/tmp.yY7I0g8ZiE +++ return 0 ++ echo 1764881031 + latest_ts=1764881031 + retries=3 ++ format_date 1764881031 ++ local timestamp=1764881031 +++ TZ=UTC +++ /usr/sbin/date -d@1764881031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:43:51 ++ format_date 1764881031 ++ local timestamp=1764881031 +++ TZ=UTC +++ /usr/sbin/date -d@1764881031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:43:51 + echo 'Waiting for last oplog chunk (2025-12-04 20:43:51) to be greater than restore target (2025-12-04 20:43:51)' Waiting for last oplog chunk (2025-12-04 20:43:51) to be greater than restore target (2025-12-04 20:43:51) + sleep 10 + [[ 1764881031 -gt 1764881031 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ad3zJckYyc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.HdSab3s9Lf +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ad3zJckYyc +++ cat /tmp/tmp.HdSab3s9Lf +++ rm /tmp/tmp.ad3zJckYyc /tmp/tmp.HdSab3s9Lf +++ return 0 ++ echo 1764881031 + latest_ts=1764881031 + retries=4 ++ format_date 1764881031 ++ local timestamp=1764881031 +++ TZ=UTC +++ /usr/sbin/date -d@1764881031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:43:51 ++ format_date 1764881031 ++ local timestamp=1764881031 +++ TZ=UTC +++ /usr/sbin/date -d@1764881031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:43:51 + echo 'Waiting for last oplog chunk (2025-12-04 20:43:51) to be greater than restore target (2025-12-04 20:43:51)' Waiting for last oplog chunk (2025-12-04 20:43:51) to be greater than restore target (2025-12-04 20:43:51) + sleep 10 + [[ 1764881031 -gt 1764881031 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.tlOeDuIzO8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BhQoYBTZNf +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.tlOeDuIzO8 +++ cat /tmp/tmp.BhQoYBTZNf +++ rm /tmp/tmp.tlOeDuIzO8 /tmp/tmp.BhQoYBTZNf +++ return 0 ++ echo 1764881031 + latest_ts=1764881031 + retries=5 ++ format_date 1764881031 ++ local timestamp=1764881031 +++ TZ=UTC +++ /usr/sbin/date -d@1764881031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:43:51 ++ format_date 1764881031 ++ local timestamp=1764881031 +++ TZ=UTC +++ /usr/sbin/date -d@1764881031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:43:51 + echo 'Waiting for last oplog chunk (2025-12-04 20:43:51) to be greater than restore target (2025-12-04 20:43:51)' Waiting for last oplog chunk (2025-12-04 20:43:51) to be greater than restore target (2025-12-04 20:43:51) + sleep 10 + [[ 1764881031 -gt 1764881031 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DJzcGHArRy ++++ mktemp +++ local LAST_ERR=/tmp/tmp.HsfjXWMIHK +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.DJzcGHArRy +++ cat /tmp/tmp.HsfjXWMIHK +++ rm /tmp/tmp.DJzcGHArRy /tmp/tmp.HsfjXWMIHK +++ return 0 ++ echo 1764881031 + latest_ts=1764881031 + retries=6 ++ format_date 1764881031 ++ local timestamp=1764881031 +++ TZ=UTC +++ /usr/sbin/date -d@1764881031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:43:51 ++ format_date 1764881031 ++ local timestamp=1764881031 +++ TZ=UTC +++ /usr/sbin/date -d@1764881031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:43:51 + echo 'Waiting for last oplog chunk (2025-12-04 20:43:51) to be greater than restore target (2025-12-04 20:43:51)' Waiting for last oplog chunk (2025-12-04 20:43:51) to be greater than restore target (2025-12-04 20:43:51) + sleep 10 + [[ 1764881031 -gt 1764881031 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QgQdZipyAU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9vjFVua9u5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.QgQdZipyAU +++ cat /tmp/tmp.9vjFVua9u5 +++ rm /tmp/tmp.QgQdZipyAU /tmp/tmp.9vjFVua9u5 +++ return 0 ++ echo 1764881031 + latest_ts=1764881031 + retries=7 ++ format_date 1764881031 ++ local timestamp=1764881031 +++ TZ=UTC +++ /usr/sbin/date -d@1764881031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:43:51 ++ format_date 1764881031 ++ local timestamp=1764881031 +++ TZ=UTC +++ /usr/sbin/date -d@1764881031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:43:51 + echo 'Waiting for last oplog chunk (2025-12-04 20:43:51) to be greater than restore target (2025-12-04 20:43:51)' Waiting for last oplog chunk (2025-12-04 20:43:51) to be greater than restore target (2025-12-04 20:43:51) + sleep 10 + [[ 1764881031 -gt 1764881031 ]] + [[ 7 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dKBjXjPDwr ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6VQUXy2Ok3 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.dKBjXjPDwr +++ cat /tmp/tmp.6VQUXy2Ok3 +++ rm /tmp/tmp.dKBjXjPDwr /tmp/tmp.6VQUXy2Ok3 +++ return 0 ++ echo 1764881151 + latest_ts=1764881151 + retries=8 ++ format_date 1764881151 ++ local timestamp=1764881151 +++ TZ=UTC +++ /usr/sbin/date -d@1764881151 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:45:51 ++ format_date 1764881031 ++ local timestamp=1764881031 +++ TZ=UTC +++ /usr/sbin/date -d@1764881031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:43:51 + echo 'Waiting for last oplog chunk (2025-12-04 20:45:51) to be greater than restore target (2025-12-04 20:43:51)' Waiting for last oplog chunk (2025-12-04 20:45:51) to be greater than restore target (2025-12-04 20:43:51) + sleep 10 + [[ 1764881151 -gt 1764881031 ]] + '[' -z '' ']' + desc 'check restore by date' + set +o xtrace ----------------------------------------------------------------------------------- check restore by date ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-2/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio-2/' + /usr/sbin/sed -e /backupSource/,+8d + /usr/sbin/sed -e 's/pitrType:/type: date/' + '[' -z 1764881031 ']' + kubectl_bin apply -f - ++ mktemp ++ format_date 1764881031 ++ local timestamp=1764881031 +++ TZ=UTC +++ /usr/sbin/date -d@1764881031 '+%Y-%m-%d %H:%M:%S' + local LAST_OUT=/tmp/tmp.OT5Uodnu3R ++ mktemp ++ echo 2025-12-04 20:43:51 + local LAST_ERR=/tmp/tmp.Erlv4YkrOH + local exit_status=0 + local timeout=4 + /usr/sbin/sed -e 's/date:/date: 2025-12-04 20:43:51/' ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OT5Uodnu3R perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-2 created + cat /tmp/tmp.Erlv4YkrOH + rm /tmp/tmp.OT5Uodnu3R /tmp/tmp.Erlv4YkrOH + return 0 + wait_restore backup-minio-2 some-name requested 0 1200 + local backup_name=backup-minio-2 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-2 object to be created.OK Waiting psmdb-restore/restore-backup-minio-2 to reach state "requested" ..............OK after 13 minutes + [[ 0 -eq 1 ]] + echo + wait_restore backup-minio-2 some-name ready 0 1600 + local backup_name=backup-minio-2 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1600 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-2 object to be created.OK Waiting psmdb-restore/restore-backup-minio-2 to reach state "ready" ...OK after 2 minutes + [[ 0 -eq 1 ]] + echo + set -o xtrace + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eIrlo740hg +++ mktemp ++ local LAST_ERR=/tmp/tmp.52mgXKM1mr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eIrlo740hg ++ cat /tmp/tmp.52mgXKM1mr ++ rm /tmp/tmp.eIrlo740hg /tmp/tmp.52mgXKM1mr ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fN14tjE9W1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZF8gkF7nCX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fN14tjE9W1 ++ cat /tmp/tmp.ZF8gkF7nCX ++ rm /tmp/tmp.fN14tjE9W1 /tmp/tmp.ZF8gkF7nCX ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KFOV98475m +++ mktemp ++ local LAST_ERR=/tmp/tmp.TuLM2v3Qnu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KFOV98475m ++ cat /tmp/tmp.TuLM2v3Qnu ++ rm /tmp/tmp.KFOV98475m /tmp/tmp.TuLM2v3Qnu ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness....................................................................... + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zxKNZpaLpf +++ mktemp ++ local LAST_ERR=/tmp/tmp.NBIjzrAr6b ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zxKNZpaLpf ++ cat /tmp/tmp.NBIjzrAr6b ++ rm /tmp/tmp.zxKNZpaLpf /tmp/tmp.NBIjzrAr6b ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BTjffRTnUD +++ mktemp ++ local LAST_ERR=/tmp/tmp.GscmvCPdek ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BTjffRTnUD ++ cat /tmp/tmp.GscmvCPdek ++ rm /tmp/tmp.BTjffRTnUD /tmp/tmp.GscmvCPdek ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VVnVWiOCn9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.kTcC3E0HaM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VVnVWiOCn9 ++ cat /tmp/tmp.kTcC3E0HaM ++ rm /tmp/tmp.VVnVWiOCn9 /tmp/tmp.kTcC3E0HaM ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ov9fVEKhdB +++ mktemp ++ local LAST_ERR=/tmp/tmp.9OYDDdzDOU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ov9fVEKhdB ++ cat /tmp/tmp.9OYDDdzDOU ++ rm /tmp/tmp.Ov9fVEKhdB /tmp/tmp.9OYDDdzDOU ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xVDOFIkspl +++ mktemp ++ local LAST_ERR=/tmp/tmp.CPRKsf38cz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xVDOFIkspl ++ cat /tmp/tmp.CPRKsf38cz ++ rm /tmp/tmp.xVDOFIkspl /tmp/tmp.CPRKsf38cz ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cIvlnJlgYd +++ mktemp ++ local LAST_ERR=/tmp/tmp.BE1Op8skRe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cIvlnJlgYd ++ cat /tmp/tmp.BE1Op8skRe ++ rm /tmp/tmp.cIvlnJlgYd /tmp/tmp.BE1Op8skRe ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 10 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-23919 -2nd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-23919 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-23919 mongodb '' '' 27017 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-23919 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local egrep: warning: egrep is obsolescent; using grep -E ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uhtqfiYKmZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.t8CDrhjire ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uhtqfiYKmZ ++ cat /tmp/tmp.t8CDrhjire ++ rm /tmp/tmp.uhtqfiYKmZ /tmp/tmp.t8CDrhjire ++ return 0 + local client_container=psmdb-client-696897d69b-7zv7m + kubectl_bin exec psmdb-client-696897d69b-7zv7m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.oWF7dlbDOe ++ mktemp + local LAST_ERR=/tmp/tmp.iJT6w1lIXO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7zv7m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oWF7dlbDOe + cat /tmp/tmp.iJT6w1lIXO + rm /tmp/tmp.oWF7dlbDOe /tmp/tmp.iJT6w1lIXO + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/compare/find-2nd.json /tmp/tmp.WRgTj3fQjY/find-2nd + run_backup backup-minio 3 physical + local name=backup-minio + local idx=3 + local type=physical + desc 'run backup backup-minio-3' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-3 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/conf/backup-minio.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-3/' + /usr/sbin/sed -e 's/type:/type: physical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.9CEWvQmOfI ++ mktemp + local LAST_ERR=/tmp/tmp.lwAwlgN2fY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9CEWvQmOfI perconaservermongodbbackup.psmdb.percona.com/backup-minio-3 created + cat /tmp/tmp.lwAwlgN2fY + rm /tmp/tmp.9CEWvQmOfI /tmp/tmp.lwAwlgN2fY + return 0 + wait_backup backup-minio-3 + local backup_name=backup-minio-3 + local target_state=ready + set +o xtrace waiting for backup-minio-3 to reach ready state...........OK + sleep 5 + compare_latest_restorable_time some-name-rs0 backup-minio-3 + local cluster=some-name-rs0 + local backup_name=backup-minio-3 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qDnBZ2QZol ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5EIv2cbSJ0 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.qDnBZ2QZol +++ cat /tmp/tmp.5EIv2cbSJ0 +++ rm /tmp/tmp.qDnBZ2QZol /tmp/tmp.5EIv2cbSJ0 +++ return 0 ++ first_timestamp=1764881241 ++ sleep 5 ++ [[ 1764881241 != '' ]] ++ [[ 1764881241 != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.s8akPiwr2E ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DsNFWVAVje +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.s8akPiwr2E +++ cat /tmp/tmp.DsNFWVAVje +++ rm /tmp/tmp.s8akPiwr2E /tmp/tmp.DsNFWVAVje +++ return 0 ++ second_timestamp=1764881241 ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 1764881241 != '' ]] ++ [[ 1764881241 != \n\u\l\l ]] ++ [[ 1764881241 == 1764881241 ]] ++ /usr/sbin/date -u -d @1764881241 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2025-12-04T20:47:21Z ++ get_latest_restorable_time_from_backup_object backup-minio-3 ++ local backup_name=backup-minio-3 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-3 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8R7pxHWAcu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ROLZ6JY60f +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb-backup backup-minio-3 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.8R7pxHWAcu +++ cat /tmp/tmp.ROLZ6JY60f +++ rm /tmp/tmp.8R7pxHWAcu /tmp/tmp.ROLZ6JY60f +++ return 0 ++ latestRestorableTime=2025-12-04T20:47:21Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2025-12-04T20:47:21Z != '' ]] ++ [[ 2025-12-04T20:47:21Z != \n\u\l\l ]] ++ echo 2025-12-04T20:47:21Z + backup_time=2025-12-04T20:47:21Z + [[ 2025-12-04T20:47:21Z != \2\0\2\5\-\1\2\-\0\4\T\2\0\:\4\7\:\2\1\Z ]] + check_recovery backup-minio-3 latest '' -3rd some-name + local backup_name=backup-minio-3 + local restore_type=latest + local restore_date= + local cmp_postfix=-3rd + local cluster_name=some-name + local backupSource= ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.955hbgJiUh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3hZc2u6JNu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.955hbgJiUh +++ cat /tmp/tmp.3hZc2u6JNu +++ rm /tmp/tmp.955hbgJiUh /tmp/tmp.3hZc2u6JNu +++ return 0 ++ echo 1764881241 + local latest_ts=1764881241 + desc 'write more data before restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by latest ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.pitr-physical-23919 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-23919 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6MnwsuLh7u +++ mktemp ++ local LAST_ERR=/tmp/tmp.AvJtPd1bVA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6MnwsuLh7u ++ cat /tmp/tmp.AvJtPd1bVA ++ rm /tmp/tmp.6MnwsuLh7u /tmp/tmp.AvJtPd1bVA ++ return 0 + local client_container=psmdb-client-696897d69b-7zv7m + kubectl_bin exec psmdb-client-696897d69b-7zv7m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.fojbm80HuD ++ mktemp + local LAST_ERR=/tmp/tmp.ikPpasSKYz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7zv7m -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fojbm80HuD Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("b1d80bce-43b8-4b25-9e15-ed2ee9325dd3") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.ikPpasSKYz + rm /tmp/tmp.fojbm80HuD /tmp/tmp.ikPpasSKYz + return 0 + [[ -n '' ]] + desc 'Restoring to latest' + set +o xtrace ----------------------------------------------------------------------------------- Restoring to latest ----------------------------------------------------------------------------------- ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FXbLM50FCe ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RCAwQgfc6o +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.FXbLM50FCe +++ cat /tmp/tmp.RCAwQgfc6o +++ rm /tmp/tmp.FXbLM50FCe /tmp/tmp.RCAwQgfc6o +++ return 0 ++ echo 1764881241 + local current_ts=1764881241 + retries=0 + [[ 1764881241 -gt 1764881241 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.MziRgwCkWC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.i9WxqlHond +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.MziRgwCkWC +++ cat /tmp/tmp.i9WxqlHond +++ rm /tmp/tmp.MziRgwCkWC /tmp/tmp.i9WxqlHond +++ return 0 ++ echo 1764881241 + latest_ts=1764881241 + retries=1 ++ format_date 1764881241 ++ local timestamp=1764881241 +++ TZ=UTC +++ /usr/sbin/date -d@1764881241 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:47:21 ++ format_date 1764881241 ++ local timestamp=1764881241 +++ TZ=UTC +++ /usr/sbin/date -d@1764881241 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:47:21 + echo 'Waiting for last oplog chunk (2025-12-04 20:47:21) to be 120 seconds older than starting chunk (2025-12-04 20:47:21)' Waiting for last oplog chunk (2025-12-04 20:47:21) to be 120 seconds older than starting chunk (2025-12-04 20:47:21) + sleep 10 + [[ 1764881241 -gt 1764881241 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kju53heNv3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.a091N0ziFA +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kju53heNv3 +++ cat /tmp/tmp.a091N0ziFA +++ rm /tmp/tmp.kju53heNv3 /tmp/tmp.a091N0ziFA +++ return 0 ++ echo 1764881241 + latest_ts=1764881241 + retries=2 ++ format_date 1764881241 ++ local timestamp=1764881241 +++ TZ=UTC +++ /usr/sbin/date -d@1764881241 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:47:21 ++ format_date 1764881241 ++ local timestamp=1764881241 +++ TZ=UTC +++ /usr/sbin/date -d@1764881241 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:47:21 + echo 'Waiting for last oplog chunk (2025-12-04 20:47:21) to be 120 seconds older than starting chunk (2025-12-04 20:47:21)' Waiting for last oplog chunk (2025-12-04 20:47:21) to be 120 seconds older than starting chunk (2025-12-04 20:47:21) + sleep 10 + [[ 1764881241 -gt 1764881241 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0ETei6RUBw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.27HtoW0IjI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.0ETei6RUBw +++ cat /tmp/tmp.27HtoW0IjI +++ rm /tmp/tmp.0ETei6RUBw /tmp/tmp.27HtoW0IjI +++ return 0 ++ echo 1764881241 + latest_ts=1764881241 + retries=3 ++ format_date 1764881241 ++ local timestamp=1764881241 +++ TZ=UTC +++ /usr/sbin/date -d@1764881241 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:47:21 ++ format_date 1764881241 ++ local timestamp=1764881241 +++ TZ=UTC +++ /usr/sbin/date -d@1764881241 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:47:21 + echo 'Waiting for last oplog chunk (2025-12-04 20:47:21) to be 120 seconds older than starting chunk (2025-12-04 20:47:21)' Waiting for last oplog chunk (2025-12-04 20:47:21) to be 120 seconds older than starting chunk (2025-12-04 20:47:21) + sleep 10 + [[ 1764881241 -gt 1764881241 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9kxvNFXjkI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.jhYPOYw0Wo +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.9kxvNFXjkI +++ cat /tmp/tmp.jhYPOYw0Wo +++ rm /tmp/tmp.9kxvNFXjkI /tmp/tmp.jhYPOYw0Wo +++ return 0 ++ echo 1764881241 + latest_ts=1764881241 + retries=4 ++ format_date 1764881241 ++ local timestamp=1764881241 +++ TZ=UTC +++ /usr/sbin/date -d@1764881241 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:47:21 ++ format_date 1764881241 ++ local timestamp=1764881241 +++ TZ=UTC +++ /usr/sbin/date -d@1764881241 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:47:21 + echo 'Waiting for last oplog chunk (2025-12-04 20:47:21) to be 120 seconds older than starting chunk (2025-12-04 20:47:21)' Waiting for last oplog chunk (2025-12-04 20:47:21) to be 120 seconds older than starting chunk (2025-12-04 20:47:21) + sleep 10 + [[ 1764881241 -gt 1764881241 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.KtGRtmZ1rJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TRz1tUAgZQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.KtGRtmZ1rJ +++ cat /tmp/tmp.TRz1tUAgZQ +++ rm /tmp/tmp.KtGRtmZ1rJ /tmp/tmp.TRz1tUAgZQ +++ return 0 ++ echo 1764881241 + latest_ts=1764881241 + retries=5 ++ format_date 1764881241 ++ local timestamp=1764881241 +++ TZ=UTC +++ /usr/sbin/date -d@1764881241 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:47:21 ++ format_date 1764881241 ++ local timestamp=1764881241 +++ TZ=UTC +++ /usr/sbin/date -d@1764881241 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:47:21 + echo 'Waiting for last oplog chunk (2025-12-04 20:47:21) to be 120 seconds older than starting chunk (2025-12-04 20:47:21)' Waiting for last oplog chunk (2025-12-04 20:47:21) to be 120 seconds older than starting chunk (2025-12-04 20:47:21) + sleep 10 + [[ 1764881241 -gt 1764881241 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wrQfOIyOv3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.M6PWH3Bev2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wrQfOIyOv3 +++ cat /tmp/tmp.M6PWH3Bev2 +++ rm /tmp/tmp.wrQfOIyOv3 /tmp/tmp.M6PWH3Bev2 +++ return 0 ++ echo 1764881241 + latest_ts=1764881241 + retries=6 ++ format_date 1764881241 ++ local timestamp=1764881241 +++ TZ=UTC +++ /usr/sbin/date -d@1764881241 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:47:21 ++ format_date 1764881241 ++ local timestamp=1764881241 +++ TZ=UTC +++ /usr/sbin/date -d@1764881241 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:47:21 + echo 'Waiting for last oplog chunk (2025-12-04 20:47:21) to be 120 seconds older than starting chunk (2025-12-04 20:47:21)' Waiting for last oplog chunk (2025-12-04 20:47:21) to be 120 seconds older than starting chunk (2025-12-04 20:47:21) + sleep 10 + [[ 1764881241 -gt 1764881241 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DjytT60bez ++++ mktemp +++ local LAST_ERR=/tmp/tmp.WQadVOofmG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.DjytT60bez +++ cat /tmp/tmp.WQadVOofmG +++ rm /tmp/tmp.DjytT60bez /tmp/tmp.WQadVOofmG +++ return 0 ++ echo 1764881241 + latest_ts=1764881241 + retries=7 ++ format_date 1764881241 ++ local timestamp=1764881241 +++ TZ=UTC +++ /usr/sbin/date -d@1764881241 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:47:21 ++ format_date 1764881241 ++ local timestamp=1764881241 +++ TZ=UTC +++ /usr/sbin/date -d@1764881241 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:47:21 + echo 'Waiting for last oplog chunk (2025-12-04 20:47:21) to be 120 seconds older than starting chunk (2025-12-04 20:47:21)' Waiting for last oplog chunk (2025-12-04 20:47:21) to be 120 seconds older than starting chunk (2025-12-04 20:47:21) + sleep 10 + [[ 1764881241 -gt 1764881241 ]] + [[ 7 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nhWg9o67MK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.OPxdtEo1q1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.nhWg9o67MK +++ cat /tmp/tmp.OPxdtEo1q1 +++ rm /tmp/tmp.nhWg9o67MK /tmp/tmp.OPxdtEo1q1 +++ return 0 ++ echo 1764881241 + latest_ts=1764881241 + retries=8 ++ format_date 1764881241 ++ local timestamp=1764881241 +++ TZ=UTC +++ /usr/sbin/date -d@1764881241 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:47:21 ++ format_date 1764881241 ++ local timestamp=1764881241 +++ TZ=UTC +++ /usr/sbin/date -d@1764881241 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:47:21 + echo 'Waiting for last oplog chunk (2025-12-04 20:47:21) to be 120 seconds older than starting chunk (2025-12-04 20:47:21)' Waiting for last oplog chunk (2025-12-04 20:47:21) to be 120 seconds older than starting chunk (2025-12-04 20:47:21) + sleep 10 + [[ 1764881241 -gt 1764881241 ]] + [[ 8 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jcdGQSna81 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.HQq9glnd90 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.jcdGQSna81 +++ cat /tmp/tmp.HQq9glnd90 +++ rm /tmp/tmp.jcdGQSna81 /tmp/tmp.HQq9glnd90 +++ return 0 ++ echo 1764881241 + latest_ts=1764881241 + retries=9 ++ format_date 1764881241 ++ local timestamp=1764881241 +++ TZ=UTC +++ /usr/sbin/date -d@1764881241 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:47:21 ++ format_date 1764881241 ++ local timestamp=1764881241 +++ TZ=UTC +++ /usr/sbin/date -d@1764881241 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:47:21 + echo 'Waiting for last oplog chunk (2025-12-04 20:47:21) to be 120 seconds older than starting chunk (2025-12-04 20:47:21)' Waiting for last oplog chunk (2025-12-04 20:47:21) to be 120 seconds older than starting chunk (2025-12-04 20:47:21) + sleep 10 + [[ 1764881241 -gt 1764881241 ]] + [[ 9 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dwRC92EqnP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XuZCRDNUFg +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.dwRC92EqnP +++ cat /tmp/tmp.XuZCRDNUFg +++ rm /tmp/tmp.dwRC92EqnP /tmp/tmp.XuZCRDNUFg +++ return 0 ++ echo 1764882546 + latest_ts=1764882546 + retries=10 ++ format_date 1764882546 ++ local timestamp=1764882546 +++ TZ=UTC +++ /usr/sbin/date -d@1764882546 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 21:09:06 ++ format_date 1764881241 ++ local timestamp=1764881241 +++ TZ=UTC +++ /usr/sbin/date -d@1764881241 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-12-04 20:47:21 + echo 'Waiting for last oplog chunk (2025-12-04 21:09:06) to be 120 seconds older than starting chunk (2025-12-04 20:47:21)' Waiting for last oplog chunk (2025-12-04 21:09:06) to be 120 seconds older than starting chunk (2025-12-04 20:47:21) + sleep 10 + [[ 1764882546 -gt 1764881241 ]] + '[' -z '' ']' + desc 'check restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- check restore by latest ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-3/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio-3/' + /usr/sbin/sed -e /backupSource/,+8d + '[' -z '' ']' + /usr/sbin/sed -e 's/pitrType:/type: latest/' + /usr/sbin/sed -e /date:/d + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.TzyA5eJqP6 ++ mktemp + local LAST_ERR=/tmp/tmp.7IiuiON8zV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TzyA5eJqP6 perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-3 created + cat /tmp/tmp.7IiuiON8zV + rm /tmp/tmp.TzyA5eJqP6 /tmp/tmp.7IiuiON8zV + return 0 + wait_restore backup-minio-3 some-name requested 0 1200 + local backup_name=backup-minio-3 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-3 object to be created.OK Waiting psmdb-restore/restore-backup-minio-3 to reach state "requested" ..............OK after 13 minutes + [[ 0 -eq 1 ]] + echo + wait_restore backup-minio-3 some-name ready 0 1600 + local backup_name=backup-minio-3 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1600 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-3 object to be created.OK Waiting psmdb-restore/restore-backup-minio-3 to reach state "ready" ...OK after 2 minutes + [[ 0 -eq 1 ]] + echo + set -o xtrace + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xD9BnLFiHR +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ny6Y4NVHxP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xD9BnLFiHR ++ cat /tmp/tmp.Ny6Y4NVHxP ++ rm /tmp/tmp.xD9BnLFiHR /tmp/tmp.Ny6Y4NVHxP ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready..............OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.o7NUpnXP3O +++ mktemp ++ local LAST_ERR=/tmp/tmp.SA1imdhQSu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.o7NUpnXP3O ++ cat /tmp/tmp.SA1imdhQSu ++ rm /tmp/tmp.o7NUpnXP3O /tmp/tmp.SA1imdhQSu ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.n7kaGYWJcO +++ mktemp ++ local LAST_ERR=/tmp/tmp.KBF286BfGy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.n7kaGYWJcO ++ cat /tmp/tmp.KBF286BfGy ++ rm /tmp/tmp.n7kaGYWJcO /tmp/tmp.KBF286BfGy ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.... + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pHbynUOJmV +++ mktemp ++ local LAST_ERR=/tmp/tmp.Kz6F7PiFnF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pHbynUOJmV ++ cat /tmp/tmp.Kz6F7PiFnF ++ rm /tmp/tmp.pHbynUOJmV /tmp/tmp.Kz6F7PiFnF ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uDzdbcrgzQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.3aypbyVCvh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uDzdbcrgzQ ++ cat /tmp/tmp.3aypbyVCvh ++ rm /tmp/tmp.uDzdbcrgzQ /tmp/tmp.3aypbyVCvh ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.coK8aDDMEN +++ mktemp ++ local LAST_ERR=/tmp/tmp.rPyCqYpvgb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.coK8aDDMEN ++ cat /tmp/tmp.rPyCqYpvgb ++ rm /tmp/tmp.coK8aDDMEN /tmp/tmp.rPyCqYpvgb ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.v5tHcLhCK9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.E26I1e9ugL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v5tHcLhCK9 ++ cat /tmp/tmp.E26I1e9ugL ++ rm /tmp/tmp.v5tHcLhCK9 /tmp/tmp.E26I1e9ugL ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2lOVVs6hCn +++ mktemp ++ local LAST_ERR=/tmp/tmp.htmAvRUxai ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2lOVVs6hCn ++ cat /tmp/tmp.htmAvRUxai ++ rm /tmp/tmp.2lOVVs6hCn /tmp/tmp.htmAvRUxai ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9qqH5VYXec +++ mktemp ++ local LAST_ERR=/tmp/tmp.neoJNg2kLm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9qqH5VYXec ++ cat /tmp/tmp.neoJNg2kLm ++ rm /tmp/tmp.9qqH5VYXec /tmp/tmp.neoJNg2kLm ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 10 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-23919 -3rd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-23919 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-23919 mongodb '' '' 27017 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-23919 + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0NfZMd6tZQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.Io7vOdfDbM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0NfZMd6tZQ ++ cat /tmp/tmp.Io7vOdfDbM ++ rm /tmp/tmp.0NfZMd6tZQ /tmp/tmp.Io7vOdfDbM ++ return 0 + local client_container=psmdb-client-696897d69b-7zv7m + kubectl_bin exec psmdb-client-696897d69b-7zv7m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.qp0heZMFWn ++ mktemp + local LAST_ERR=/tmp/tmp.t4TxD2Xr5s + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7zv7m -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-23919.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qp0heZMFWn + cat /tmp/tmp.t4TxD2Xr5s + rm /tmp/tmp.qp0heZMFWn /tmp/tmp.t4TxD2Xr5s + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr-physical/compare/find-3rd.json /tmp/tmp.WRgTj3fQjY/find-3rd + destroy pitr-physical-23919 + local namespace=pitr-physical-23919 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.RWPJsxUlbS +++ mktemp ++ local LAST_ERR=/tmp/tmp.PeCpRTSlgI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RWPJsxUlbS ++ cat /tmp/tmp.PeCpRTSlgI ++ rm /tmp/tmp.RWPJsxUlbS /tmp/tmp.PeCpRTSlgI ++ return 0 + '[' 2 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.vvnJ25EjFZ ++ mktemp + local LAST_ERR=/tmp/tmp.DL1wItSECA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vvnJ25EjFZ NAME CLUSTER STORAGE DESTINATION TYPE SIZE STATUS COMPLETED AGE backup-minio-2 some-name minio s3://operator-testing/2025-12-04T20:39:21Z physical 5.59MB ready 49m 50m backup-minio-3 some-name minio s3://operator-testing/2025-12-04T21:06:42Z physical 3.52MB ready 22m 22m + cat /tmp/tmp.DL1wItSECA + rm /tmp/tmp.vvnJ25EjFZ /tmp/tmp.DL1wItSECA + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.yWvIW1aXeN ++ mktemp + local LAST_ERR=/tmp/tmp.tRCDbRzK83 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yWvIW1aXeN perconaservermongodbbackup.psmdb.percona.com "backup-minio-2" deleted from pitr-physical-23919 namespace perconaservermongodbbackup.psmdb.percona.com "backup-minio-3" deleted from pitr-physical-23919 namespace + cat /tmp/tmp.tRCDbRzK83 + rm /tmp/tmp.yWvIW1aXeN /tmp/tmp.tRCDbRzK83 + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.Bt42oQQjAC ++ mktemp + local LAST_ERR=/tmp/tmp.4qTYv3o0Rx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Bt42oQQjAC customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.4qTYv3o0Rx + rm /tmp/tmp.Bt42oQQjAC /tmp/tmp.4qTYv3o0Rx + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.NM2ghTQLnG ++ mktemp + local LAST_ERR=/tmp/tmp.ZlVvP7x63r + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NM2ghTQLnG + cat /tmp/tmp.ZlVvP7x63r + rm /tmp/tmp.NM2ghTQLnG /tmp/tmp.ZlVvP7x63r + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.wUbMjMw2wZ ++ mktemp + local LAST_ERR=/tmp/tmp.31ZMWU8Hz2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wUbMjMw2wZ + cat /tmp/tmp.31ZMWU8Hz2 + rm /tmp/tmp.wUbMjMw2wZ /tmp/tmp.31ZMWU8Hz2 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.81uPMRNajy ++ mktemp + local LAST_ERR=/tmp/tmp.WXLkAYsg8Z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.81uPMRNajy + cat /tmp/tmp.WXLkAYsg8Z + rm /tmp/tmp.81uPMRNajy /tmp/tmp.WXLkAYsg8Z + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.Jl16GfGnCR ++ mktemp + local LAST_ERR=/tmp/tmp.lwMzSfX1zq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Jl16GfGnCR clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.lwMzSfX1zq + rm /tmp/tmp.Jl16GfGnCR /tmp/tmp.lwMzSfX1zq + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.uILIosfK8v ++ mktemp + local LAST_ERR=/tmp/tmp.tWXTsjY9g2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.uILIosfK8v + cat /tmp/tmp.tWXTsjY9g2 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.uILIosfK8v + cat /tmp/tmp.tWXTsjY9g2 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.uILIosfK8v + cat /tmp/tmp.tWXTsjY9g2 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.uILIosfK8v + cat /tmp/tmp.tWXTsjY9g2 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.uILIosfK8v /tmp/tmp.tWXTsjY9g2 + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace pitr-physical-23919 + rm -rf /tmp/tmp.WRgTj3fQjY + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.jOTLCoMG9i + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp + local LAST_OUT=/tmp/tmp.KPbta0b8Dr ++ mktemp + local LAST_ERR=/tmp/tmp.ZoalhwFIvi + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.4YRX6Oht4I + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace pitr-physical-23919 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator