Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/logs/pitr.log grep: warning: stray \ before - Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + main + create_infra pitr-6260 + local ns=pitr-6260 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.AX1MjirKuY ++ mktemp + local LAST_ERR=/tmp/tmp.tcHnJDQSSP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AX1MjirKuY customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.tcHnJDQSSP + rm /tmp/tmp.AX1MjirKuY /tmp/tmp.tcHnJDQSSP + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.9l8Da7SxHf ++ mktemp + local LAST_ERR=/tmp/tmp.NEejmwnlgD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9l8Da7SxHf + cat /tmp/tmp.NEejmwnlgD + rm /tmp/tmp.9l8Da7SxHf /tmp/tmp.NEejmwnlgD + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.xtGThKqcKE ++ mktemp + local LAST_ERR=/tmp/tmp.jnXxzlQfZ7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xtGThKqcKE + cat /tmp/tmp.jnXxzlQfZ7 + rm /tmp/tmp.xtGThKqcKE /tmp/tmp.jnXxzlQfZ7 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.5WUuTJHQFX ++ mktemp + local LAST_ERR=/tmp/tmp.wvPB9fnrm0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5WUuTJHQFX + cat /tmp/tmp.wvPB9fnrm0 + rm /tmp/tmp.5WUuTJHQFX /tmp/tmp.wvPB9fnrm0 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.svVm6nGshX ++ mktemp + local LAST_ERR=/tmp/tmp.2uY73WAqfG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.svVm6nGshX clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.2uY73WAqfG + rm /tmp/tmp.svVm6nGshX /tmp/tmp.2uY73WAqfG + return 0 + check_crd_for_deletion PR-1961-5b0cc669 + local git_tag=PR-1961-5b0cc669 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1961-5b0cc669/deploy/crd.yaml ++ /usr/sbin/sed s/---//g ++ yq eval .metadata.name ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.79xWmmA3rJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.XPSgMJpeLx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.79xWmmA3rJ ++ cat /tmp/tmp.XPSgMJpeLx Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.79xWmmA3rJ ++ cat /tmp/tmp.XPSgMJpeLx Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.79xWmmA3rJ ++ cat /tmp/tmp.XPSgMJpeLx Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.79xWmmA3rJ ++ cat /tmp/tmp.XPSgMJpeLx Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.79xWmmA3rJ /tmp/tmp.XPSgMJpeLx ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + xargs kubectl delete ns ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.KZ9btK5wVV + local LAST_OUT=/tmp/tmp.SNpjKsbtU0 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.cCVUq0wKTD + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.9aPLGlCjB0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KZ9btK5wVV + cat /tmp/tmp.cCVUq0wKTD + rm /tmp/tmp.KZ9btK5wVV /tmp/tmp.cCVUq0wKTD + return 0 namespace "pitr-20941" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SNpjKsbtU0 namespace "psmdb-operator" deleted + cat /tmp/tmp.9aPLGlCjB0 + rm /tmp/tmp.SNpjKsbtU0 /tmp/tmp.9aPLGlCjB0 + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.SJ5qvp22EO ++ mktemp + local LAST_ERR=/tmp/tmp.VAkSp6t8Jq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SJ5qvp22EO + cat /tmp/tmp.VAkSp6t8Jq + rm /tmp/tmp.SJ5qvp22EO /tmp/tmp.VAkSp6t8Jq + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.qgtzWOyN59 ++ mktemp + local LAST_ERR=/tmp/tmp.8umNnt7hC7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qgtzWOyN59 namespace/psmdb-operator created + cat /tmp/tmp.8umNnt7hC7 + rm /tmp/tmp.qgtzWOyN59 /tmp/tmp.8umNnt7hC7 + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.dZflrmbKIV +++ mktemp ++ local LAST_ERR=/tmp/tmp.EroLvCvmZr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dZflrmbKIV ++ cat /tmp/tmp.EroLvCvmZr ++ rm /tmp/tmp.dZflrmbKIV /tmp/tmp.EroLvCvmZr ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-5b0cc669-2-cluster3 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Adf1vPjiul ++ mktemp + local LAST_ERR=/tmp/tmp.OIA1hRSnAM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-5b0cc669-2-cluster3 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Adf1vPjiul Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-5b0cc669-2-cluster3" modified. + cat /tmp/tmp.OIA1hRSnAM + rm /tmp/tmp.Adf1vPjiul /tmp/tmp.OIA1hRSnAM + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.DHBTQujyCp ++ mktemp + local LAST_ERR=/tmp/tmp.YV8UAfkTkL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DHBTQujyCp customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.YV8UAfkTkL + rm /tmp/tmp.DHBTQujyCp /tmp/tmp.YV8UAfkTkL + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.g3D6rSYEv6 ++ mktemp + local LAST_ERR=/tmp/tmp.kN86EDkzF5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g3D6rSYEv6 clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.kN86EDkzF5 + rm /tmp/tmp.g3D6rSYEv6 /tmp/tmp.kN86EDkzF5 + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.yB7gn1B0AM ++ mktemp + local LAST_ERR=/tmp/tmp.aTLoyJxQ2T + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yB7gn1B0AM deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.aTLoyJxQ2T + rm /tmp/tmp.yB7gn1B0AM /tmp/tmp.aTLoyJxQ2T + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.4NDVFRzfyh +++ mktemp ++ local LAST_ERR=/tmp/tmp.x8wpkQLRJA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4NDVFRzfyh ++ cat /tmp/tmp.x8wpkQLRJA ++ rm /tmp/tmp.4NDVFRzfyh /tmp/tmp.x8wpkQLRJA ++ return 0 + wait_operator_pod percona-server-mongodb-operator-5464c7ccdb-wshnk + local pod=percona-server-mongodb-operator-5464c7ccdb-wshnk + set +o xtrace waiting for pod/percona-server-mongodb-operator-5464c7ccdb-wshnk to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.LjhKSBcJwV +++ mktemp ++ local LAST_ERR=/tmp/tmp.3aTY1BbFCd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LjhKSBcJwV ++ cat /tmp/tmp.3aTY1BbFCd ++ rm /tmp/tmp.LjhKSBcJwV /tmp/tmp.3aTY1BbFCd ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-5464c7ccdb-wshnk ++ mktemp + local LAST_OUT=/tmp/tmp.gJVAOsz9yx ++ mktemp + local LAST_ERR=/tmp/tmp.kK0D7MkRSo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-5464c7ccdb-wshnk + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gJVAOsz9yx + cat /tmp/tmp.kK0D7MkRSo + rm /tmp/tmp.gJVAOsz9yx /tmp/tmp.kK0D7MkRSo + return 0 2025-12-04T20:28:17.317Z INFO setup Manager starting up {"gitCommit": "5b0cc6694bb3fee55fa4cc155ac44e1b0423e748", "gitBranch": "PR-1961-5b0cc669", "buildTime": "", "goVersion": "go1.25.5", "os": "linux", "arch": "amd64"} + create_namespace pitr-6260 + local namespace=pitr-6260 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ tail -n1 ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces pitr-6260' + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pitr-6260 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pitr-6260 --ignore-not-found + awk '{print$1}' ++ mktemp + local LAST_OUT=/tmp/tmp.fma1LqyvRw ++ mktemp + xargs kubectl delete ns egrep: warning: egrep is obsolescent; using grep -E + local LAST_ERR=/tmp/tmp.pi7UpwJs3P + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ mktemp + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_OUT=/tmp/tmp.XLSmuBasq7 ++ mktemp + local LAST_ERR=/tmp/tmp.3AnIjFJVgm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace pitr-6260 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fma1LqyvRw + cat /tmp/tmp.pi7UpwJs3P + rm /tmp/tmp.fma1LqyvRw /tmp/tmp.pi7UpwJs3P + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XLSmuBasq7 + cat /tmp/tmp.3AnIjFJVgm + rm /tmp/tmp.XLSmuBasq7 /tmp/tmp.3AnIjFJVgm + return 0 + kubectl_bin wait --for=delete namespace pitr-6260 ++ mktemp + local LAST_OUT=/tmp/tmp.eeGCC0nR7y ++ mktemp + local LAST_ERR=/tmp/tmp.9yRV9Ak07A + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace pitr-6260 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eeGCC0nR7y + cat /tmp/tmp.9yRV9Ak07A + rm /tmp/tmp.eeGCC0nR7y /tmp/tmp.9yRV9Ak07A + return 0 + desc 'create namespace pitr-6260' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pitr-6260 ----------------------------------------------------------------------------------- + kubectl_bin create namespace pitr-6260 ++ mktemp + local LAST_OUT=/tmp/tmp.a4oz5WQljh ++ mktemp + local LAST_ERR=/tmp/tmp.l8aO5t6lGQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace pitr-6260 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.a4oz5WQljh namespace/pitr-6260 created + cat /tmp/tmp.l8aO5t6lGQ + rm /tmp/tmp.a4oz5WQljh /tmp/tmp.l8aO5t6lGQ + return 0 + set_kube_ctx pitr-6260 + local namespace=pitr-6260 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.GMnViutaDm +++ mktemp ++ local LAST_ERR=/tmp/tmp.QtbSOMijJj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GMnViutaDm ++ cat /tmp/tmp.QtbSOMijJj ++ rm /tmp/tmp.GMnViutaDm /tmp/tmp.QtbSOMijJj ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-5b0cc669-2-cluster3 --namespace=pitr-6260 ++ mktemp + local LAST_OUT=/tmp/tmp.C7ZgmLStKJ ++ mktemp + local LAST_ERR=/tmp/tmp.iVanAgRLcI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-5b0cc669-2-cluster3 --namespace=pitr-6260 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.C7ZgmLStKJ Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-5b0cc669-2-cluster3" modified. + cat /tmp/tmp.iVanAgRLcI + rm /tmp/tmp.C7ZgmLStKJ /tmp/tmp.iVanAgRLcI + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Thu Dec 4 20:29:02 2025 NAMESPACE: pitr-6260 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.pitr-6260.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace pitr-6260 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace pitr-6260 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace pitr-6260 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace pitr-6260 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.s8TMLkbisg +++ mktemp ++ local LAST_ERR=/tmp/tmp.zMaCy4oBkz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.s8TMLkbisg ++ cat /tmp/tmp.zMaCy4oBkz ++ rm /tmp/tmp.s8TMLkbisg /tmp/tmp.zMaCy4oBkz ++ return 0 + MINIO_POD=minio-service-d9589b474-8tss5 + wait_pod minio-service-d9589b474-8tss5 + local pod=minio-service-d9589b474-8tss5 + set +o xtrace waiting for pod/minio-service-d9589b474-8tss5 to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-6260.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.YOF7YO9et7 ++ mktemp + local LAST_ERR=/tmp/tmp.A4H9lnvVt4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-6260.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YOF7YO9et7 service/minio-service created + cat /tmp/tmp.A4H9lnvVt4 + rm /tmp/tmp.YOF7YO9et7 /tmp/tmp.A4H9lnvVt4 + return 0 + create_minio_bucket operator-testing + local bucket=operator-testing + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.Aeg3HfUY32 ++ mktemp + local LAST_ERR=/tmp/tmp.Mcl9FnTnLB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Aeg3HfUY32 make_bucket: operator-testing pod "aws-cli" deleted from pitr-6260 namespace + cat /tmp/tmp.Mcl9FnTnLB + rm /tmp/tmp.Aeg3HfUY32 /tmp/tmp.Mcl9FnTnLB + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/minio-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.TOoSeXPgMp ++ mktemp + local LAST_ERR=/tmp/tmp.r54DOmp2Cb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/minio-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TOoSeXPgMp secret/some-users created deployment.apps/psmdb-client created secret/minio-secret created + cat /tmp/tmp.r54DOmp2Cb + rm /tmp/tmp.TOoSeXPgMp /tmp/tmp.r54DOmp2Cb + return 0 + cluster=some-name-rs0 + desc 'create first PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/conf/some-name-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/conf/some-name-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' ++ mktemp + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669"' + yq eval '(.spec | select(has("backup"))).backup.image = "percona/percona-backup-mongodb:2.11.0"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.RJQISbO44g ++ mktemp + local LAST_ERR=/tmp/tmp.cO4IKIZclG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RJQISbO44g perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.cO4IKIZclG + rm /tmp/tmp.RJQISbO44g /tmp/tmp.cO4IKIZclG + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.......OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready....OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KwQEH2uJVm +++ mktemp ++ local LAST_ERR=/tmp/tmp.91r9z1Da5v ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KwQEH2uJVm ++ cat /tmp/tmp.91r9z1Da5v ++ rm /tmp/tmp.KwQEH2uJVm /tmp/tmp.91r9z1Da5v ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.....OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.568Fcnu3lP +++ mktemp ++ local LAST_ERR=/tmp/tmp.aEtE9GGzl8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.568Fcnu3lP ++ cat /tmp/tmp.aEtE9GGzl8 ++ rm /tmp/tmp.568Fcnu3lP /tmp/tmp.aEtE9GGzl8 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UJsnwBeinP +++ mktemp ++ local LAST_ERR=/tmp/tmp.JMNtd28CEk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UJsnwBeinP ++ cat /tmp/tmp.JMNtd28CEk ++ rm /tmp/tmp.UJsnwBeinP /tmp/tmp.JMNtd28CEk ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness. + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.gmxU6WaGGk/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("pitr-6260", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.sxhQUqOEih ++ mktemp + local LAST_ERR=/tmp/tmp.zfHAnauWi4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sxhQUqOEih + cat /tmp/tmp.zfHAnauWi4 + rm /tmp/tmp.sxhQUqOEih /tmp/tmp.zfHAnauWi4 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.gmxU6WaGGk/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.gmxU6WaGGk/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.gmxU6WaGGk/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/statefulset_some-name-rs0.yml /tmp/tmp.gmxU6WaGGk/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-12-04T20:31:09+0000] compare_kubectl: statefulset/some-name-rs0 OK + write_initial_data + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.pitr-6260 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.pitr-6260 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uy3tDI9Fqh +++ mktemp ++ local LAST_ERR=/tmp/tmp.WLDldYKB3K ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uy3tDI9Fqh ++ cat /tmp/tmp.WLDldYKB3K ++ rm /tmp/tmp.uy3tDI9Fqh /tmp/tmp.WLDldYKB3K ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.lrdn3vpmLB ++ mktemp + local LAST_ERR=/tmp/tmp.6J4SKbuKDl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lrdn3vpmLB Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("a4679946-9181-422e-aaa4-76d9d8a9cfd9") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.6J4SKbuKDl + rm /tmp/tmp.lrdn3vpmLB /tmp/tmp.6J4SKbuKDl + return 0 + sleep 2 + write_document + local cmp_postfix= + local cluster_name=some-name-rs0 + desc 'write initial data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write initial data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.pitr-6260 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.pitr-6260 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.s0hZooAsH6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.HCsKM9nYQl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.s0hZooAsH6 ++ cat /tmp/tmp.HCsKM9nYQl ++ rm /tmp/tmp.s0hZooAsH6 /tmp/tmp.HCsKM9nYQl ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.4V23SD6sPa ++ mktemp + local LAST_ERR=/tmp/tmp.1lTQ9SL399 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4V23SD6sPa Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("1d68b286-b615-496a-9c75-01d30765cc82") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.1lTQ9SL399 + rm /tmp/tmp.4V23SD6sPa /tmp/tmp.1lTQ9SL399 + return 0 + minikube_sleep + sleep_time=10 + [[ '' == 1 ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 '' + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:31:18+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.jJq809KADr +++ mktemp ++ local LAST_ERR=/tmp/tmp.jfzTWsRLNW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jJq809KADr ++ cat /tmp/tmp.jfzTWsRLNW ++ rm /tmp/tmp.jJq809KADr /tmp/tmp.jfzTWsRLNW ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.1zhRi5ubXT ++ mktemp + local LAST_ERR=/tmp/tmp.hyzZUTEUvm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1zhRi5ubXT + cat /tmp/tmp.hyzZUTEUvm + rm /tmp/tmp.1zhRi5ubXT /tmp/tmp.hyzZUTEUvm + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find.json /tmp/tmp.gmxU6WaGGk/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 '' + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:31:20+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' egrep: warning: egrep is obsolescent; using grep -E ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kp2cQM0soK +++ mktemp ++ local LAST_ERR=/tmp/tmp.pS8SLOkUtJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kp2cQM0soK ++ cat /tmp/tmp.pS8SLOkUtJ ++ rm /tmp/tmp.kp2cQM0soK /tmp/tmp.pS8SLOkUtJ ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.M9NkJvfaGI ++ mktemp + local LAST_ERR=/tmp/tmp.TkGLhq0DIQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.M9NkJvfaGI + cat /tmp/tmp.TkGLhq0DIQ + rm /tmp/tmp.M9NkJvfaGI /tmp/tmp.TkGLhq0DIQ + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find.json /tmp/tmp.gmxU6WaGGk/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 '' + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:31:23+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.nzhXLMhgjI +++ mktemp ++ local LAST_ERR=/tmp/tmp.0oCOTwplbu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nzhXLMhgjI ++ cat /tmp/tmp.0oCOTwplbu ++ rm /tmp/tmp.nzhXLMhgjI /tmp/tmp.0oCOTwplbu ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.FCQiFrRUB3 ++ mktemp + local LAST_ERR=/tmp/tmp.aGxrVgqUh3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FCQiFrRUB3 + cat /tmp/tmp.aGxrVgqUh3 + rm /tmp/tmp.FCQiFrRUB3 /tmp/tmp.aGxrVgqUh3 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find.json /tmp/tmp.gmxU6WaGGk/find + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-0...2025-12-04T20:30:59.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-1...2025-12-04T20:31:04.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-2...2025-12-04T20:31:05.000+0000 I listening for the commands + backup_name_minio=backup-minio + run_mongo 'use myApp\n db.test2.insert({ x: 100501 })' myApp:myPass@some-name-rs0.pitr-6260 + local 'command=use myApp\n db.test2.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.pitr-6260 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6AZOk6DX8H +++ mktemp ++ local LAST_ERR=/tmp/tmp.gjz3xMMBpi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6AZOk6DX8H ++ cat /tmp/tmp.gjz3xMMBpi ++ rm /tmp/tmp.6AZOk6DX8H /tmp/tmp.gjz3xMMBpi ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test2.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.x8QV7aB2Ec ++ mktemp + local LAST_ERR=/tmp/tmp.LClqSicBTS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test2.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.x8QV7aB2Ec Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("768d8614-fbcb-400f-a9be-00fc64c9ccb1") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.LClqSicBTS + rm /tmp/tmp.x8QV7aB2Ec /tmp/tmp.LClqSicBTS + return 0 + run_backup backup-minio 0 + local name=backup-minio + local idx=0 + desc 'run backup backup-minio-0' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-0 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/conf/backup-minio.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-0/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.2fbJ1oGnUV ++ mktemp + local LAST_ERR=/tmp/tmp.VRemkafXiZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2fbJ1oGnUV perconaservermongodbbackup.psmdb.percona.com/backup-minio-0 created + cat /tmp/tmp.VRemkafXiZ + rm /tmp/tmp.2fbJ1oGnUV /tmp/tmp.VRemkafXiZ + return 0 + wait_backup backup-minio-0 + local backup_name=backup-minio-0 + local target_state=ready + set +o xtrace waiting for backup-minio-0 to reach ready state.......OK + write_document -2nd + local cmp_postfix=-2nd + local cluster_name=some-name-rs0 + desc 'write initial data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write initial data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.pitr-6260 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.pitr-6260 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rUTsoIqnXj +++ mktemp ++ local LAST_ERR=/tmp/tmp.UGaP57z1EQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rUTsoIqnXj ++ cat /tmp/tmp.UGaP57z1EQ ++ rm /tmp/tmp.rUTsoIqnXj /tmp/tmp.UGaP57z1EQ ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.QrNrLOsaCH ++ mktemp + local LAST_ERR=/tmp/tmp.k7Npjnfhyd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QrNrLOsaCH Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("4061224f-4c29-4d07-8480-5adf7e2fae47") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.k7Npjnfhyd + rm /tmp/tmp.QrNrLOsaCH /tmp/tmp.k7Npjnfhyd + return 0 + minikube_sleep + sleep_time=10 + [[ '' == 1 ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:31:50+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oyjSbbaNsZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.C7fTc2QGzx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 egrep: warning: egrep is obsolescent; using grep -E ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oyjSbbaNsZ ++ cat /tmp/tmp.C7fTc2QGzx ++ rm /tmp/tmp.oyjSbbaNsZ /tmp/tmp.C7fTc2QGzx ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.EVfex4VYme ++ mktemp + local LAST_ERR=/tmp/tmp.5BDcgncEh6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EVfex4VYme + cat /tmp/tmp.5BDcgncEh6 + rm /tmp/tmp.EVfex4VYme /tmp/tmp.5BDcgncEh6 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-2nd.json /tmp/tmp.gmxU6WaGGk/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:31:53+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.EBtdQcRQyR +++ mktemp ++ local LAST_ERR=/tmp/tmp.4HiCdRYwKP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EBtdQcRQyR ++ cat /tmp/tmp.4HiCdRYwKP ++ rm /tmp/tmp.EBtdQcRQyR /tmp/tmp.4HiCdRYwKP ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.umc2HITC2W ++ mktemp + local LAST_ERR=/tmp/tmp.kjbvTs4dqi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.umc2HITC2W + cat /tmp/tmp.kjbvTs4dqi + rm /tmp/tmp.umc2HITC2W /tmp/tmp.kjbvTs4dqi + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-2nd.json /tmp/tmp.gmxU6WaGGk/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:31:57+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 + local driver=mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.W7rMwoNh8u +++ mktemp ++ local LAST_ERR=/tmp/tmp.IkAX4Jwrme ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.W7rMwoNh8u ++ cat /tmp/tmp.IkAX4Jwrme ++ rm /tmp/tmp.W7rMwoNh8u /tmp/tmp.IkAX4Jwrme ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.jDrw2WZDVz ++ mktemp + local LAST_ERR=/tmp/tmp.u8w8MGjb0d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jDrw2WZDVz + cat /tmp/tmp.u8w8MGjb0d + rm /tmp/tmp.jDrw2WZDVz /tmp/tmp.u8w8MGjb0d + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-2nd.json /tmp/tmp.gmxU6WaGGk/find-2nd + sleep 2 ++ run_mongo 'new Date().toISOString()' myApp:myPass@some-name-rs0.pitr-6260 mongodb '' --quiet ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match' ++ local 'command=new Date().toISOString()' ++ local uri=myApp:myPass@some-name-rs0.pitr-6260 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ cut -c1-19 ++ tr T ' ' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rmUTeLvPqd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.10jk3zf6oj +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.rmUTeLvPqd +++ cat /tmp/tmp.10jk3zf6oj +++ rm /tmp/tmp.rmUTeLvPqd /tmp/tmp.10jk3zf6oj +++ return 0 ++ local client_container=psmdb-client-696897d69b-cg7fs ++ local mongo_flag=--quiet ++ [[ myApp:myPass@some-name-rs0.pitr-6260 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''new Date().toISOString()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DNrxdSmggS +++ mktemp ++ local LAST_ERR=/tmp/tmp.vbtWe4eCrm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''new Date().toISOString()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DNrxdSmggS ++ cat /tmp/tmp.vbtWe4eCrm ++ rm /tmp/tmp.DNrxdSmggS /tmp/tmp.vbtWe4eCrm ++ return 0 + time_now='2025-12-04 20:32:05' + check_recovery backup-minio-0 date '2025-12-04 20:32:05' -2nd '' some-name test2 + local backup_name=backup-minio-0 + local restore_type=date + local 'restore_date=2025-12-04 20:32:05' + local cmp_postfix=-2nd + local backupSource= + local cluster_name=some-name + local selective_collection=test2 + local restore_name=restore-backup-minio-0 + local restore_file=restore.yml + local cluster + cluster=some-name-rs0 + desc 'write more data before restore by date' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by date ----------------------------------------------------------------------------------- + sleep 60 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.pitr-6260 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.pitr-6260 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2eyWK3XcOp +++ mktemp ++ local LAST_ERR=/tmp/tmp.LrDTEyakqU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2eyWK3XcOp ++ cat /tmp/tmp.LrDTEyakqU ++ rm /tmp/tmp.2eyWK3XcOp /tmp/tmp.LrDTEyakqU ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.l51RtezKmy ++ mktemp + local LAST_ERR=/tmp/tmp.wQ8tqT2YYv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.l51RtezKmy Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("2743142e-2890-4cee-8a79-ec427895d8cb") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.wQ8tqT2YYv + rm /tmp/tmp.l51RtezKmy /tmp/tmp.wQ8tqT2YYv + return 0 + '[' -n test2 ']' + run_mongo 'use myApp\n db.test2.drop()' myApp:myPass@some-name-rs0.pitr-6260 + local 'command=use myApp\n db.test2.drop()' + local uri=myApp:myPass@some-name-rs0.pitr-6260 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.00KD1QpziZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.7yIgrwXaKm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.00KD1QpziZ ++ cat /tmp/tmp.7yIgrwXaKm ++ rm /tmp/tmp.00KD1QpziZ /tmp/tmp.7yIgrwXaKm ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test2.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ND48vlBYsp ++ mktemp + local LAST_ERR=/tmp/tmp.fXNKfsWYb4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test2.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ND48vlBYsp Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("4fb0c2cc-eeb4-4806-b03f-eb7852599df5") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.fXNKfsWYb4 + rm /tmp/tmp.ND48vlBYsp /tmp/tmp.fXNKfsWYb4 + return 0 + desc 'waiting for chunks to be uploaded' + set +o xtrace ----------------------------------------------------------------------------------- waiting for chunks to be uploaded ----------------------------------------------------------------------------------- + sleep 120 + desc 'check restore by date' + set +o xtrace ----------------------------------------------------------------------------------- check restore by date ----------------------------------------------------------------------------------- + '[' -z '' ']' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-0/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio-0/' + /usr/sbin/sed -e 's/clusterName:/clusterName: some-name/' + /usr/sbin/sed -e 's/pitrType:/type: date/' + '[' -z '2025-12-04 20:32:05' ']' + /usr/sbin/sed -e 's/date:/date: 2025-12-04 20:32:05/' + /usr/sbin/sed -e /backupSource/,+2d + '[' -n test2 ']' + yq eval '.spec.selective = {"namespaces": ["myApp.test"], "withUsersAndRoles": true}' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.jL1H3wPJc4 ++ mktemp + local LAST_ERR=/tmp/tmp.dpjiHAtrzY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jL1H3wPJc4 perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-0 created + cat /tmp/tmp.dpjiHAtrzY + rm /tmp/tmp.jL1H3wPJc4 /tmp/tmp.dpjiHAtrzY + return 0 + wait_restore backup-minio-0 some-name + local backup_name=backup-minio-0 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-0 object to be created.OK Waiting psmdb-restore/restore-backup-minio-0 to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3zLVj6Tk8K +++ mktemp ++ local LAST_ERR=/tmp/tmp.Kvn2lJxszk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3zLVj6Tk8K ++ cat /tmp/tmp.Kvn2lJxszk ++ rm /tmp/tmp.3zLVj6Tk8K /tmp/tmp.Kvn2lJxszk ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + echo + set -o xtrace + '[' -n test2 ']' ++ collection_exists test2 ./e2e-tests/pitr/run: line 108: collection_exists: command not found + [[ '' == \t\r\u\e ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:35:39+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.xMst2xcVFi +++ mktemp ++ local LAST_ERR=/tmp/tmp.HV9Nlcvjaw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xMst2xcVFi ++ cat /tmp/tmp.HV9Nlcvjaw ++ rm /tmp/tmp.xMst2xcVFi /tmp/tmp.HV9Nlcvjaw ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.p9Om1rZleT ++ mktemp + local LAST_ERR=/tmp/tmp.jkZB3Cmkd7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p9Om1rZleT + cat /tmp/tmp.jkZB3Cmkd7 + rm /tmp/tmp.p9Om1rZleT /tmp/tmp.jkZB3Cmkd7 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-2nd.json /tmp/tmp.gmxU6WaGGk/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:35:42+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.ei1nlwm8c3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QPaqkc94X5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ei1nlwm8c3 ++ cat /tmp/tmp.QPaqkc94X5 ++ rm /tmp/tmp.ei1nlwm8c3 /tmp/tmp.QPaqkc94X5 ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.h93kBNpuk6 ++ mktemp + local LAST_ERR=/tmp/tmp.cXaPwWyUjQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.h93kBNpuk6 + cat /tmp/tmp.cXaPwWyUjQ + rm /tmp/tmp.h93kBNpuk6 /tmp/tmp.cXaPwWyUjQ + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-2nd.json /tmp/tmp.gmxU6WaGGk/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:35:44+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.exuLcL8qBZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.3uyXau95NS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.exuLcL8qBZ ++ cat /tmp/tmp.3uyXau95NS ++ rm /tmp/tmp.exuLcL8qBZ /tmp/tmp.3uyXau95NS ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.36L78tviwz ++ mktemp + local LAST_ERR=/tmp/tmp.KWx8X1XP08 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.36L78tviwz + cat /tmp/tmp.KWx8X1XP08 + rm /tmp/tmp.36L78tviwz /tmp/tmp.KWx8X1XP08 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-2nd.json /tmp/tmp.gmxU6WaGGk/find-2nd + run_backup backup-minio 1 + local name=backup-minio + local idx=1 + desc 'run backup backup-minio-1' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-1 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/conf/backup-minio.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-1/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.ZPpRKRJKzh ++ mktemp + local LAST_ERR=/tmp/tmp.1WmajXTS4G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZPpRKRJKzh perconaservermongodbbackup.psmdb.percona.com/backup-minio-1 created + cat /tmp/tmp.1WmajXTS4G + rm /tmp/tmp.ZPpRKRJKzh /tmp/tmp.1WmajXTS4G + return 0 + wait_backup backup-minio-1 + local backup_name=backup-minio-1 + local target_state=ready + set +o xtrace waiting for backup-minio-1 to reach ready state......OK + compare_latest_restorable_time some-name-rs0 backup-minio-1 + local cluster=some-name-rs0 + local backup_name=backup-minio-1 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XqG6yxHDiS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.G2VtzNWkP4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.XqG6yxHDiS +++ cat /tmp/tmp.G2VtzNWkP4 +++ rm /tmp/tmp.XqG6yxHDiS /tmp/tmp.G2VtzNWkP4 +++ return 0 ++ first_timestamp=1764880520 ++ sleep 5 ++ [[ 1764880520 != '' ]] ++ [[ 1764880520 != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7PCtEEy2p6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.v1QWlpH7Vo +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.7PCtEEy2p6 +++ cat /tmp/tmp.v1QWlpH7Vo +++ rm /tmp/tmp.7PCtEEy2p6 /tmp/tmp.v1QWlpH7Vo +++ return 0 ++ second_timestamp=1764880520 ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 1764880520 != '' ]] ++ [[ 1764880520 != \n\u\l\l ]] ++ [[ 1764880520 == 1764880520 ]] ++ /usr/sbin/date -u -d @1764880520 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2025-12-04T20:35:20Z ++ get_latest_restorable_time_from_backup_object backup-minio-1 ++ local backup_name=backup-minio-1 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-1 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kcjPQiRLOn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MquRBb1GwR +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb-backup backup-minio-1 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kcjPQiRLOn +++ cat /tmp/tmp.MquRBb1GwR +++ rm /tmp/tmp.kcjPQiRLOn /tmp/tmp.MquRBb1GwR +++ return 0 ++ latestRestorableTime=2025-12-04T20:35:20Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2025-12-04T20:35:20Z != '' ]] ++ [[ 2025-12-04T20:35:20Z != \n\u\l\l ]] ++ echo 2025-12-04T20:35:20Z + backup_time=2025-12-04T20:35:20Z + [[ 2025-12-04T20:35:20Z != \2\0\2\5\-\1\2\-\0\4\T\2\0\:\3\5\:\2\0\Z ]] + check_recovery backup-minio-1 latest '' -3rd '' some-name + local backup_name=backup-minio-1 + local restore_type=latest + local restore_date= + local cmp_postfix=-3rd + local backupSource= + local cluster_name=some-name + local selective_collection= + local restore_name=restore-backup-minio-1 + local restore_file=restore.yml + local cluster + cluster=some-name-rs0 + desc 'write more data before restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by latest ----------------------------------------------------------------------------------- + sleep 60 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.pitr-6260 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.pitr-6260 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fORwU7gB4n +++ mktemp ++ local LAST_ERR=/tmp/tmp.yGwQzb9T60 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fORwU7gB4n ++ cat /tmp/tmp.yGwQzb9T60 ++ rm /tmp/tmp.fORwU7gB4n /tmp/tmp.yGwQzb9T60 ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.9qa8ri3Dcs ++ mktemp + local LAST_ERR=/tmp/tmp.JyVtApH2GA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9qa8ri3Dcs Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("fdc5a7c9-c15e-4a1d-ada1-069ef3a86114") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.JyVtApH2GA + rm /tmp/tmp.9qa8ri3Dcs /tmp/tmp.JyVtApH2GA + return 0 + '[' -n '' ']' + desc 'waiting for chunks to be uploaded' + set +o xtrace ----------------------------------------------------------------------------------- waiting for chunks to be uploaded ----------------------------------------------------------------------------------- + sleep 120 + desc 'check restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- check restore by latest ----------------------------------------------------------------------------------- + '[' -z '' ']' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-1/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio-1/' + /usr/sbin/sed -e 's/clusterName:/clusterName: some-name/' + /usr/sbin/sed -e 's/pitrType:/type: latest/' + /usr/sbin/sed -e /backupSource/,+2d + '[' -z '' ']' + /usr/sbin/sed -e /date:/d + '[' -n '' ']' + yq + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.3UXoB1iRAd ++ mktemp + local LAST_ERR=/tmp/tmp.8aURGmRm1o + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3UXoB1iRAd perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-1 created + cat /tmp/tmp.8aURGmRm1o + rm /tmp/tmp.3UXoB1iRAd /tmp/tmp.8aURGmRm1o + return 0 + wait_restore backup-minio-1 some-name + local backup_name=backup-minio-1 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-1 object to be created.OK Waiting psmdb-restore/restore-backup-minio-1 to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qb3rxd6JV6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.0z7cyz5swH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qb3rxd6JV6 ++ cat /tmp/tmp.0z7cyz5swH ++ rm /tmp/tmp.qb3rxd6JV6 /tmp/tmp.0z7cyz5swH ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + echo + set -o xtrace + '[' -n '' ']' + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 -3rd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:39:47+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.7ZLhPzX7Fw +++ mktemp ++ local LAST_ERR=/tmp/tmp.5LDtOTM4ho ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7ZLhPzX7Fw ++ cat /tmp/tmp.5LDtOTM4ho ++ rm /tmp/tmp.7ZLhPzX7Fw /tmp/tmp.5LDtOTM4ho ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Llfh4ajgB6 ++ mktemp + local LAST_ERR=/tmp/tmp.tV16kO85oV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Llfh4ajgB6 + cat /tmp/tmp.tV16kO85oV + rm /tmp/tmp.Llfh4ajgB6 /tmp/tmp.tV16kO85oV + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-3rd.json /tmp/tmp.gmxU6WaGGk/find-3rd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 -3rd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:39:50+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' egrep: warning: egrep is obsolescent; using grep -E +++ mktemp ++ local LAST_OUT=/tmp/tmp.AEN2fz5CYS +++ mktemp ++ local LAST_ERR=/tmp/tmp.Vh1oCjr65v ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AEN2fz5CYS ++ cat /tmp/tmp.Vh1oCjr65v ++ rm /tmp/tmp.AEN2fz5CYS /tmp/tmp.Vh1oCjr65v ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.PQl1EzQb9e ++ mktemp + local LAST_ERR=/tmp/tmp.LgWxuQYwLG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PQl1EzQb9e + cat /tmp/tmp.LgWxuQYwLG + rm /tmp/tmp.PQl1EzQb9e /tmp/tmp.LgWxuQYwLG + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-3rd.json /tmp/tmp.gmxU6WaGGk/find-3rd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 -3rd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:39:53+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.4HKNbUGNdL +++ mktemp ++ local LAST_ERR=/tmp/tmp.nJWhK1Z3nJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4HKNbUGNdL ++ cat /tmp/tmp.nJWhK1Z3nJ ++ rm /tmp/tmp.4HKNbUGNdL /tmp/tmp.nJWhK1Z3nJ ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.KOv0WtmaVx ++ mktemp + local LAST_ERR=/tmp/tmp.1bBLt8IN57 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KOv0WtmaVx + cat /tmp/tmp.1bBLt8IN57 + rm /tmp/tmp.KOv0WtmaVx /tmp/tmp.1bBLt8IN57 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-3rd.json /tmp/tmp.gmxU6WaGGk/find-3rd + desc 'restore pitr using backupSource' + set +o xtrace ----------------------------------------------------------------------------------- restore pitr using backupSource ----------------------------------------------------------------------------------- + reset_collection + desc 'reset data' + set +o xtrace ----------------------------------------------------------------------------------- reset data ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.remove({})' myApp:myPass@some-name-rs0.pitr-6260 + local 'command=use myApp\n db.test.remove({})' + local uri=myApp:myPass@some-name-rs0.pitr-6260 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zD5EvNWs8o +++ mktemp ++ local LAST_ERR=/tmp/tmp.zpzqNs0F4c ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zD5EvNWs8o ++ cat /tmp/tmp.zpzqNs0F4c ++ rm /tmp/tmp.zD5EvNWs8o /tmp/tmp.zpzqNs0F4c ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.remove({})\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ljC0NKmh5c ++ mktemp + local LAST_ERR=/tmp/tmp.1DObaeKRst + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.remove({})\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ljC0NKmh5c Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("5bee4695-0d45-4710-a8ec-8eed8b3348cc") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nRemoved" : 3 }) bye + cat /tmp/tmp.1DObaeKRst + rm /tmp/tmp.ljC0NKmh5c /tmp/tmp.1DObaeKRst + return 0 + sleep 2 + write_document + local cmp_postfix= + local cluster_name=some-name-rs0 + desc 'write initial data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write initial data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.pitr-6260 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.pitr-6260 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.T6yMFGtMlk +++ mktemp ++ local LAST_ERR=/tmp/tmp.YdiOaHfU99 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.T6yMFGtMlk ++ cat /tmp/tmp.YdiOaHfU99 ++ rm /tmp/tmp.T6yMFGtMlk /tmp/tmp.YdiOaHfU99 ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.5MfEOtwM8G ++ mktemp + local LAST_ERR=/tmp/tmp.2pvY08AJMd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5MfEOtwM8G Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("4db1b73c-aaa5-4307-8f32-533d5a592e75") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.2pvY08AJMd + rm /tmp/tmp.5MfEOtwM8G /tmp/tmp.2pvY08AJMd + return 0 + minikube_sleep + sleep_time=10 + [[ '' == 1 ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 '' + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:40:03+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.s6lMuY8XLQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.tYpIMxraoE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.s6lMuY8XLQ ++ cat /tmp/tmp.tYpIMxraoE ++ rm /tmp/tmp.s6lMuY8XLQ /tmp/tmp.tYpIMxraoE ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.QEzQkUxrsi ++ mktemp + local LAST_ERR=/tmp/tmp.yi24yzslGU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QEzQkUxrsi + cat /tmp/tmp.yi24yzslGU + rm /tmp/tmp.QEzQkUxrsi /tmp/tmp.yi24yzslGU + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find.json /tmp/tmp.gmxU6WaGGk/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 '' + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:40:07+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' egrep: warning: egrep is obsolescent; using grep -E +++ mktemp ++ local LAST_OUT=/tmp/tmp.dgbtsQpc6r +++ mktemp ++ local LAST_ERR=/tmp/tmp.sXem8IO0oO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dgbtsQpc6r ++ cat /tmp/tmp.sXem8IO0oO ++ rm /tmp/tmp.dgbtsQpc6r /tmp/tmp.sXem8IO0oO ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.tR36i69jiC ++ mktemp + local LAST_ERR=/tmp/tmp.NzQ0rVhfe9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tR36i69jiC + cat /tmp/tmp.NzQ0rVhfe9 + rm /tmp/tmp.tR36i69jiC /tmp/tmp.NzQ0rVhfe9 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find.json /tmp/tmp.gmxU6WaGGk/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 '' + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:40:09+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.EMjySzl6LC +++ mktemp ++ local LAST_ERR=/tmp/tmp.KH3YWrcda6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EMjySzl6LC ++ cat /tmp/tmp.KH3YWrcda6 ++ rm /tmp/tmp.EMjySzl6LC /tmp/tmp.KH3YWrcda6 ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Qkvzf3JBAd ++ mktemp + local LAST_ERR=/tmp/tmp.M2jZxauqSi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Qkvzf3JBAd + cat /tmp/tmp.M2jZxauqSi + rm /tmp/tmp.Qkvzf3JBAd /tmp/tmp.M2jZxauqSi + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find.json /tmp/tmp.gmxU6WaGGk/find + run_backup backup-minio 2 + local name=backup-minio + local idx=2 + desc 'run backup backup-minio-2' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-2 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/conf/backup-minio.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-2/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.YQKsmedUbx ++ mktemp + local LAST_ERR=/tmp/tmp.6Sav0QtDJA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YQKsmedUbx perconaservermongodbbackup.psmdb.percona.com/backup-minio-2 created + cat /tmp/tmp.6Sav0QtDJA + rm /tmp/tmp.YQKsmedUbx /tmp/tmp.6Sav0QtDJA + return 0 + wait_backup backup-minio-2 + local backup_name=backup-minio-2 + local target_state=ready + set +o xtrace waiting for backup-minio-2 to reach ready state......OK + compare_latest_restorable_time some-name-rs0 backup-minio-2 + local cluster=some-name-rs0 + local backup_name=backup-minio-2 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kxFhUJBTnf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AVoIpFakxE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kxFhUJBTnf +++ cat /tmp/tmp.AVoIpFakxE +++ rm /tmp/tmp.kxFhUJBTnf /tmp/tmp.AVoIpFakxE +++ return 0 ++ first_timestamp=1764880766 ++ sleep 5 ++ [[ 1764880766 != '' ]] ++ [[ 1764880766 != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xZWg0xB9Rz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.46J68Xnvmp +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.xZWg0xB9Rz +++ cat /tmp/tmp.46J68Xnvmp +++ rm /tmp/tmp.xZWg0xB9Rz /tmp/tmp.46J68Xnvmp +++ return 0 ++ second_timestamp=1764880766 ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 1764880766 != '' ]] ++ [[ 1764880766 != \n\u\l\l ]] ++ [[ 1764880766 == 1764880766 ]] ++ /usr/sbin/date -u -d @1764880766 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2025-12-04T20:39:26Z ++ get_latest_restorable_time_from_backup_object backup-minio-2 ++ local backup_name=backup-minio-2 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-2 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.W5xxt9UOxt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.T2USEAPlN6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb-backup backup-minio-2 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.W5xxt9UOxt +++ cat /tmp/tmp.T2USEAPlN6 +++ rm /tmp/tmp.W5xxt9UOxt /tmp/tmp.T2USEAPlN6 +++ return 0 ++ latestRestorableTime=2025-12-04T20:39:26Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2025-12-04T20:39:26Z != '' ]] ++ [[ 2025-12-04T20:39:26Z != \n\u\l\l ]] ++ echo 2025-12-04T20:39:26Z + backup_time=2025-12-04T20:39:26Z + [[ 2025-12-04T20:39:26Z != \2\0\2\5\-\1\2\-\0\4\T\2\0\:\3\9\:\2\6\Z ]] ++ run_mongo 'new Date().toISOString()' myApp:myPass@some-name-rs0.pitr-6260 mongodb '' --quiet ++ local 'command=new Date().toISOString()' ++ local uri=myApp:myPass@some-name-rs0.pitr-6260 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match' ++ cut -c1-19 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ tr T ' ' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Vm3Wb0838p ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RpXCjLq9my +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Vm3Wb0838p +++ cat /tmp/tmp.RpXCjLq9my +++ rm /tmp/tmp.Vm3Wb0838p /tmp/tmp.RpXCjLq9my +++ return 0 ++ local client_container=psmdb-client-696897d69b-cg7fs ++ local mongo_flag=--quiet ++ [[ myApp:myPass@some-name-rs0.pitr-6260 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''new Date().toISOString()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.juuq1CBC9p +++ mktemp ++ local LAST_ERR=/tmp/tmp.6wA4N0Wh4e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''new Date().toISOString()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.juuq1CBC9p ++ cat /tmp/tmp.6wA4N0Wh4e ++ rm /tmp/tmp.juuq1CBC9p /tmp/tmp.6wA4N0Wh4e ++ return 0 + time_now='2025-12-04 20:40:43' + check_recovery backup-minio-2 date '2025-12-04 20:40:43' '' backupSource some-name + local backup_name=backup-minio-2 + local restore_type=date + local 'restore_date=2025-12-04 20:40:43' + local cmp_postfix= + local backupSource=backupSource + local cluster_name=some-name + local selective_collection= + local restore_name=restore-backup-minio-2 + local restore_file=restore.yml + local cluster + cluster=some-name-rs0 + desc 'write more data before restore by date' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by date ----------------------------------------------------------------------------------- + sleep 60 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.pitr-6260 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.pitr-6260 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yzhvJKI0YJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.VOYPx3636J ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yzhvJKI0YJ ++ cat /tmp/tmp.VOYPx3636J ++ rm /tmp/tmp.yzhvJKI0YJ /tmp/tmp.VOYPx3636J ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.OVyr37FUeI ++ mktemp + local LAST_ERR=/tmp/tmp.6uXvtNhIih + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OVyr37FUeI Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("60247477-c06f-4002-b00c-f09f4bbe44be") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.6uXvtNhIih + rm /tmp/tmp.OVyr37FUeI /tmp/tmp.6uXvtNhIih + return 0 + '[' -n '' ']' + desc 'waiting for chunks to be uploaded' + set +o xtrace ----------------------------------------------------------------------------------- waiting for chunks to be uploaded ----------------------------------------------------------------------------------- + sleep 120 + desc 'check restore by date' + set +o xtrace ----------------------------------------------------------------------------------- check restore by date ----------------------------------------------------------------------------------- + '[' -z backupSource ']' ++ get_backup_dest backup-minio-2 ++ local backup_name=backup-minio-2 ++ kubectl_bin get psmdb-backup backup-minio-2 -o 'jsonpath={.status.destination}' ++ sed 's|s3://||' ++ sed -e 's/.json$//' ++ sed 's|gs://||' ++ sed 's|azure://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.m8zyJ4VPvh +++ mktemp ++ local LAST_ERR=/tmp/tmp.pzdLf1PGLq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-2 -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.m8zyJ4VPvh ++ cat /tmp/tmp.pzdLf1PGLq ++ rm /tmp/tmp.m8zyJ4VPvh /tmp/tmp.pzdLf1PGLq ++ return 0 + backup_dest=operator-testing/pitr-prefix-1/2025-12-04T20:40:15Z + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-2/' + /usr/sbin/sed -e /backupName/d + /usr/sbin/sed -e 's/clusterName:/clusterName: some-name/' + /usr/sbin/sed -e 's/pitrType:/type: date/' + '[' -z '2025-12-04 20:40:43' ']' + /usr/sbin/sed -e 's/date:/date: 2025-12-04 20:40:43/' + kubectl_bin apply -f - + /usr/sbin/sed -e 's|BACKUP-NAME|operator-testing/pitr-prefix-1/2025-12-04T20:40:15Z|' ++ mktemp + '[' -n '' ']' + yq + local LAST_OUT=/tmp/tmp.kzapFx7gPL ++ mktemp + local LAST_ERR=/tmp/tmp.iBGlxYE84j + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kzapFx7gPL perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-2 created + cat /tmp/tmp.iBGlxYE84j + rm /tmp/tmp.kzapFx7gPL /tmp/tmp.iBGlxYE84j + return 0 + wait_restore backup-minio-2 some-name + local backup_name=backup-minio-2 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-2 object to be created.OK Waiting psmdb-restore/restore-backup-minio-2 to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nuyY7bx33Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.QQWG2WAxQb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nuyY7bx33Q ++ cat /tmp/tmp.QQWG2WAxQb ++ rm /tmp/tmp.nuyY7bx33Q /tmp/tmp.QQWG2WAxQb ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + echo + set -o xtrace + '[' -n '' ']' + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 '' + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:44:25+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.QXNJ7lbvR1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.mwZ5bGxj6c ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QXNJ7lbvR1 ++ cat /tmp/tmp.mwZ5bGxj6c ++ rm /tmp/tmp.QXNJ7lbvR1 /tmp/tmp.mwZ5bGxj6c ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.vWYQbsDRhP ++ mktemp + local LAST_ERR=/tmp/tmp.HhaKz07Y6z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vWYQbsDRhP + cat /tmp/tmp.HhaKz07Y6z + rm /tmp/tmp.vWYQbsDRhP /tmp/tmp.HhaKz07Y6z + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find.json /tmp/tmp.gmxU6WaGGk/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 '' + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:44:27+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.yZgpkfbE43 +++ mktemp ++ local LAST_ERR=/tmp/tmp.dfAx3ViU3Y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yZgpkfbE43 ++ cat /tmp/tmp.dfAx3ViU3Y ++ rm /tmp/tmp.yZgpkfbE43 /tmp/tmp.dfAx3ViU3Y ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.6CbIz6jDTz ++ mktemp + local LAST_ERR=/tmp/tmp.sSwolikCc6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6CbIz6jDTz + cat /tmp/tmp.sSwolikCc6 + rm /tmp/tmp.6CbIz6jDTz /tmp/tmp.sSwolikCc6 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find.json /tmp/tmp.gmxU6WaGGk/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 '' + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:44:31+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 + local driver=mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.MtEYBHrFoc +++ mktemp ++ local LAST_ERR=/tmp/tmp.IvRQI3jDdw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MtEYBHrFoc ++ cat /tmp/tmp.IvRQI3jDdw ++ rm /tmp/tmp.MtEYBHrFoc /tmp/tmp.IvRQI3jDdw ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.6oElSpMQrM ++ mktemp + local LAST_ERR=/tmp/tmp.YI8Jp8nz37 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6oElSpMQrM + cat /tmp/tmp.YI8Jp8nz37 + rm /tmp/tmp.6oElSpMQrM /tmp/tmp.YI8Jp8nz37 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find.json /tmp/tmp.gmxU6WaGGk/find + run_backup backup-minio 3 + local name=backup-minio + local idx=3 + desc 'run backup backup-minio-3' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-3 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/conf/backup-minio.yml + kubectl_bin apply -f - + /usr/sbin/sed -e 's/name:/name: backup-minio-3/' ++ mktemp + local LAST_OUT=/tmp/tmp.KJcsXJVMwc ++ mktemp + local LAST_ERR=/tmp/tmp.VTaB3xq284 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KJcsXJVMwc perconaservermongodbbackup.psmdb.percona.com/backup-minio-3 created + cat /tmp/tmp.VTaB3xq284 + rm /tmp/tmp.KJcsXJVMwc /tmp/tmp.VTaB3xq284 + return 0 + wait_backup backup-minio-3 + local backup_name=backup-minio-3 + local target_state=ready + set +o xtrace waiting for backup-minio-3 to reach ready state......OK + compare_latest_restorable_time some-name-rs0 backup-minio-3 + local cluster=some-name-rs0 + local backup_name=backup-minio-3 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Vvl2CcjDgh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.R9DDvMt53k +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Vvl2CcjDgh +++ cat /tmp/tmp.R9DDvMt53k +++ rm /tmp/tmp.Vvl2CcjDgh /tmp/tmp.R9DDvMt53k +++ return 0 ++ first_timestamp=1764881042 ++ sleep 5 ++ [[ 1764881042 != '' ]] ++ [[ 1764881042 != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xoGpf3AL5V ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qKp4xQVqey +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.xoGpf3AL5V +++ cat /tmp/tmp.qKp4xQVqey +++ rm /tmp/tmp.xoGpf3AL5V /tmp/tmp.qKp4xQVqey +++ return 0 ++ second_timestamp=1764881042 ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 1764881042 != '' ]] ++ [[ 1764881042 != \n\u\l\l ]] ++ [[ 1764881042 == 1764881042 ]] ++ /usr/sbin/date -u -d @1764881042 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2025-12-04T20:44:02Z ++ get_latest_restorable_time_from_backup_object backup-minio-3 ++ local backup_name=backup-minio-3 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-3 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FFMqvek9xd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AHTb64exAM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb-backup backup-minio-3 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.FFMqvek9xd +++ cat /tmp/tmp.AHTb64exAM +++ rm /tmp/tmp.FFMqvek9xd /tmp/tmp.AHTb64exAM +++ return 0 ++ latestRestorableTime=2025-12-04T20:44:02Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2025-12-04T20:44:02Z != '' ]] ++ [[ 2025-12-04T20:44:02Z != \n\u\l\l ]] ++ echo 2025-12-04T20:44:02Z + backup_time=2025-12-04T20:44:02Z + [[ 2025-12-04T20:44:02Z != \2\0\2\5\-\1\2\-\0\4\T\2\0\:\4\4\:\0\2\Z ]] + check_recovery backup-minio-3 latest '' -4th backupSource some-name + local backup_name=backup-minio-3 + local restore_type=latest + local restore_date= + local cmp_postfix=-4th + local backupSource=backupSource + local cluster_name=some-name + local selective_collection= + local restore_name=restore-backup-minio-3 + local restore_file=restore.yml + local cluster + cluster=some-name-rs0 + desc 'write more data before restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by latest ----------------------------------------------------------------------------------- + sleep 60 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.pitr-6260 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.pitr-6260 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.A6FgyzFStM +++ mktemp ++ local LAST_ERR=/tmp/tmp.T7YDI1EhZV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.A6FgyzFStM ++ cat /tmp/tmp.T7YDI1EhZV ++ rm /tmp/tmp.A6FgyzFStM /tmp/tmp.T7YDI1EhZV ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.N9Xij8xRTA ++ mktemp + local LAST_ERR=/tmp/tmp.tJjzRhDK7t + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.N9Xij8xRTA Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("1a236862-da75-458d-b91b-270d16b375d3") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.tJjzRhDK7t + rm /tmp/tmp.N9Xij8xRTA /tmp/tmp.tJjzRhDK7t + return 0 + '[' -n '' ']' + desc 'waiting for chunks to be uploaded' + set +o xtrace ----------------------------------------------------------------------------------- waiting for chunks to be uploaded ----------------------------------------------------------------------------------- + sleep 120 + desc 'check restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- check restore by latest ----------------------------------------------------------------------------------- + '[' -z backupSource ']' ++ get_backup_dest backup-minio-3 ++ local backup_name=backup-minio-3 ++ sed -e 's/.json$//' ++ kubectl_bin get psmdb-backup backup-minio-3 -o 'jsonpath={.status.destination}' ++ sed 's|s3://||' ++ sed 's|azure://||' ++ sed 's|gs://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PI2D8jVtdt +++ mktemp ++ local LAST_ERR=/tmp/tmp.IQfA0Y3BnU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-3 -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PI2D8jVtdt ++ cat /tmp/tmp.IQfA0Y3BnU ++ rm /tmp/tmp.PI2D8jVtdt /tmp/tmp.IQfA0Y3BnU ++ return 0 + backup_dest=operator-testing/pitr-prefix-1/2025-12-04T20:44:35Z + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-3/' + /usr/sbin/sed -e /backupName/d + /usr/sbin/sed -e 's/clusterName:/clusterName: some-name/' + /usr/sbin/sed -e 's/pitrType:/type: latest/' + '[' -z '' ']' + /usr/sbin/sed -e /date:/d + /usr/sbin/sed -e 's|BACKUP-NAME|operator-testing/pitr-prefix-1/2025-12-04T20:44:35Z|' + '[' -n '' ']' + yq + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.81XOQeRumv ++ mktemp + local LAST_ERR=/tmp/tmp.beTFC5f15c + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.81XOQeRumv perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-3 created + cat /tmp/tmp.beTFC5f15c + rm /tmp/tmp.81XOQeRumv /tmp/tmp.beTFC5f15c + return 0 + wait_restore backup-minio-3 some-name + local backup_name=backup-minio-3 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-3 object to be created.OK Waiting psmdb-restore/restore-backup-minio-3 to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gpttFQL149 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Y3MSQnFSdW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gpttFQL149 ++ cat /tmp/tmp.Y3MSQnFSdW ++ rm /tmp/tmp.gpttFQL149 /tmp/tmp.Y3MSQnFSdW ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + echo + set -o xtrace + '[' -n '' ']' + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 -4th + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:48:27+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.vRdjSexllQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.iuq6rhpWgL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vRdjSexllQ ++ cat /tmp/tmp.iuq6rhpWgL ++ rm /tmp/tmp.vRdjSexllQ /tmp/tmp.iuq6rhpWgL ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.f8pqInG7bN ++ mktemp + local LAST_ERR=/tmp/tmp.VKGhXNjxTY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.f8pqInG7bN + cat /tmp/tmp.VKGhXNjxTY + rm /tmp/tmp.f8pqInG7bN /tmp/tmp.VKGhXNjxTY + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.gmxU6WaGGk/find-4th + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 -4th + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:48:30+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.7fw14OKfGs +++ mktemp ++ local LAST_ERR=/tmp/tmp.9Mqhq8g1R6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7fw14OKfGs ++ cat /tmp/tmp.9Mqhq8g1R6 ++ rm /tmp/tmp.7fw14OKfGs /tmp/tmp.9Mqhq8g1R6 ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.pKFww20oW8 ++ mktemp + local LAST_ERR=/tmp/tmp.rlediPaUI1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pKFww20oW8 + cat /tmp/tmp.rlediPaUI1 + rm /tmp/tmp.pKFww20oW8 /tmp/tmp.rlediPaUI1 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.gmxU6WaGGk/find-4th + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 -4th + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:48:33+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.9RdkeJ4FR5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.4ls7ELLmqg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9RdkeJ4FR5 ++ cat /tmp/tmp.4ls7ELLmqg ++ rm /tmp/tmp.9RdkeJ4FR5 /tmp/tmp.4ls7ELLmqg ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.eHxzkVQRxH ++ mktemp + local LAST_ERR=/tmp/tmp.svMVssuxFc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eHxzkVQRxH + cat /tmp/tmp.svMVssuxFc + rm /tmp/tmp.eHxzkVQRxH /tmp/tmp.svMVssuxFc + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.gmxU6WaGGk/find-4th + desc 'restore pitr using backupSource on second cluster with another prefix in storage' + set +o xtrace ----------------------------------------------------------------------------------- restore pitr using backupSource on second cluster with another prefix in storage ----------------------------------------------------------------------------------- + second_cluster=cluster2-rs0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/conf/cluster2-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/conf/cluster2-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/conf/cluster2-rs0.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669"' + yq eval '(.spec | select(has("backup"))).backup.image = "percona/percona-backup-mongodb:2.11.0"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.m2flAJSANX ++ mktemp + local LAST_ERR=/tmp/tmp.mPezOpuxb8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.m2flAJSANX perconaservermongodb.psmdb.percona.com/cluster2 created + cat /tmp/tmp.mPezOpuxb8 + rm /tmp/tmp.m2flAJSANX /tmp/tmp.mPezOpuxb8 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running cluster2-rs0 3 + local name=cluster2-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=cluster2 ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod cluster2-rs0-0 + local pod=cluster2-rs0-0 + set +o xtrace waiting for pod/cluster2-rs0-0 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod cluster2-rs0-1 + local pod=cluster2-rs0-1 + set +o xtrace waiting for pod/cluster2-rs0-1 to be ready...OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cluster2 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kJZy3uXd6i +++ mktemp ++ local LAST_ERR=/tmp/tmp.3pIvE1n4uk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cluster2 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kJZy3uXd6i ++ cat /tmp/tmp.3pIvE1n4uk ++ rm /tmp/tmp.kJZy3uXd6i /tmp/tmp.3pIvE1n4uk ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod cluster2-rs0-2 + local pod=cluster2-rs0-2 + set +o xtrace waiting for pod/cluster2-rs0-2 to be ready.....OK ++ kubectl_bin get psmdb cluster2 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iv3IYMaHYS +++ mktemp ++ local LAST_ERR=/tmp/tmp.GCcjtX9U6x ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cluster2 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iv3IYMaHYS ++ cat /tmp/tmp.GCcjtX9U6x ++ rm /tmp/tmp.iv3IYMaHYS /tmp/tmp.GCcjtX9U6x ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb cluster2 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NpnDKtvejc +++ mktemp ++ local LAST_ERR=/tmp/tmp.mBi6aIIlEI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cluster2 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NpnDKtvejc ++ cat /tmp/tmp.mBi6aIIlEI ++ rm /tmp/tmp.NpnDKtvejc /tmp/tmp.mBi6aIIlEI ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.. + wait_backup_agent cluster2-rs0-0 + local agent_pod=cluster2-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in cluster2-rs0-0...2025-12-04T20:49:56.000+0000 I listening for the commands + wait_backup_agent cluster2-rs0-1 + local agent_pod=cluster2-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in cluster2-rs0-1...2025-12-04T20:50:01.000+0000 I listening for the commands + wait_backup_agent cluster2-rs0-2 + local agent_pod=cluster2-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in cluster2-rs0-2...2025-12-04T20:50:03.000+0000 I listening for the commands + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@cluster2-rs0.pitr-6260 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@cluster2-rs0.pitr-6260 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IiUMHmhZ98 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QMpaGrich5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IiUMHmhZ98 ++ cat /tmp/tmp.QMpaGrich5 ++ rm /tmp/tmp.IiUMHmhZ98 /tmp/tmp.QMpaGrich5 ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ userAdmin:userAdmin123456@cluster2-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.SMmDQlB4qt ++ mktemp + local LAST_ERR=/tmp/tmp.qdt8D05Jnt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SMmDQlB4qt Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cluster2-rs0-2.cluster2-rs0.pitr-6260.svc.cluster.local:27017,cluster2-rs0-1.cluster2-rs0.pitr-6260.svc.cluster.local:27017,cluster2-rs0-0.cluster2-rs0.pitr-6260.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("9793680b-b441-4fe4-b40a-7ff5bfd0c384") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.qdt8D05Jnt + rm /tmp/tmp.SMmDQlB4qt /tmp/tmp.qdt8D05Jnt + return 0 + sleep 2 + check_recovery backup-minio-3 latest '' -4th backupSource cluster2 '' backup-minio-3-second-cluster restore2.yml + local backup_name=backup-minio-3 + local restore_type=latest + local restore_date= + local cmp_postfix=-4th + local backupSource=backupSource + local cluster_name=cluster2 + local selective_collection= + local restore_name=restore-backup-minio-3-second-cluster + local restore_file=restore2.yml + local cluster + cluster=cluster2-rs0 + desc 'write more data before restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by latest ----------------------------------------------------------------------------------- + sleep 60 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@cluster2-rs0.pitr-6260 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@cluster2-rs0.pitr-6260 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Tf9RkStiEG +++ mktemp ++ local LAST_ERR=/tmp/tmp.0JfSpSXxMX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Tf9RkStiEG ++ cat /tmp/tmp.0JfSpSXxMX ++ rm /tmp/tmp.Tf9RkStiEG /tmp/tmp.0JfSpSXxMX ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@cluster2-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.XJVP2yk0Wm ++ mktemp + local LAST_ERR=/tmp/tmp.Mjw5YZmEF1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XJVP2yk0Wm Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cluster2-rs0-1.cluster2-rs0.pitr-6260.svc.cluster.local:27017,cluster2-rs0-0.cluster2-rs0.pitr-6260.svc.cluster.local:27017,cluster2-rs0-2.cluster2-rs0.pitr-6260.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("479aa52c-c97e-4312-9608-76f73d06ae71") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.Mjw5YZmEF1 + rm /tmp/tmp.XJVP2yk0Wm /tmp/tmp.Mjw5YZmEF1 + return 0 + '[' -n '' ']' + desc 'waiting for chunks to be uploaded' + set +o xtrace ----------------------------------------------------------------------------------- waiting for chunks to be uploaded ----------------------------------------------------------------------------------- + sleep 120 + desc 'check restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- check restore by latest ----------------------------------------------------------------------------------- + '[' -z backupSource ']' ++ get_backup_dest backup-minio-3 ++ local backup_name=backup-minio-3 ++ kubectl_bin get psmdb-backup backup-minio-3 -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' +++ mktemp ++ sed 's|azure://||' ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.YgmS1jq5k7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.WRV88wiIkK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-3 -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YgmS1jq5k7 ++ cat /tmp/tmp.WRV88wiIkK ++ rm /tmp/tmp.YgmS1jq5k7 /tmp/tmp.WRV88wiIkK ++ return 0 + backup_dest=operator-testing/pitr-prefix-1/2025-12-04T20:44:35Z + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/conf/restore2.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-3-second-cluster/' + /usr/sbin/sed -e /backupName/d + '[' -z '' ']' + /usr/sbin/sed -e /date:/d + /usr/sbin/sed -e 's/clusterName:/clusterName: cluster2/' + /usr/sbin/sed -e 's/pitrType:/type: latest/' + '[' -n '' ']' + yq + /usr/sbin/sed -e 's|BACKUP-NAME|operator-testing/pitr-prefix-1/2025-12-04T20:44:35Z|' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.SHNvYNpzMw ++ mktemp + local LAST_ERR=/tmp/tmp.Rw7inoTqft + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SHNvYNpzMw perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-3-second-cluster created + cat /tmp/tmp.Rw7inoTqft + rm /tmp/tmp.SHNvYNpzMw /tmp/tmp.Rw7inoTqft + return 0 + wait_restore backup-minio-3-second-cluster cluster2 + local backup_name=backup-minio-3-second-cluster + local cluster_name=cluster2 + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-3-second-cluster object to be created.OK Waiting psmdb-restore/restore-backup-minio-3-second-cluster to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency cluster2 + local cluster_name=cluster2 + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb cluster2 -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.b46PR6FCFq +++ mktemp ++ local LAST_ERR=/tmp/tmp.q94hcelYuh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cluster2 -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.b46PR6FCFq ++ cat /tmp/tmp.q94hcelYuh ++ rm /tmp/tmp.b46PR6FCFq /tmp/tmp.q94hcelYuh ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + echo + set -o xtrace + '[' -n '' ']' + compare_mongo_cmd find myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260 -4th + local command=find + local uri=myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:53:45+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.GUzAMZOp0q +++ mktemp ++ local LAST_ERR=/tmp/tmp.FXJUqRDI0y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GUzAMZOp0q ++ cat /tmp/tmp.FXJUqRDI0y ++ rm /tmp/tmp.GUzAMZOp0q /tmp/tmp.FXJUqRDI0y ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ql75pwmshZ ++ mktemp + local LAST_ERR=/tmp/tmp.3up25yWwH4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ql75pwmshZ + cat /tmp/tmp.3up25yWwH4 + rm /tmp/tmp.ql75pwmshZ /tmp/tmp.3up25yWwH4 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.gmxU6WaGGk/find-4th + compare_mongo_cmd find myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260 -4th + local command=find + local uri=myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:53:47+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260 mongodb '' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.eKBidbjZGI +++ mktemp ++ local LAST_ERR=/tmp/tmp.d3D8VKFdqr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eKBidbjZGI ++ cat /tmp/tmp.d3D8VKFdqr ++ rm /tmp/tmp.eKBidbjZGI /tmp/tmp.d3D8VKFdqr ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.kqRHj7jrQv ++ mktemp + local LAST_ERR=/tmp/tmp.Kfl6jA230L + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kqRHj7jrQv + cat /tmp/tmp.Kfl6jA230L + rm /tmp/tmp.kqRHj7jrQv /tmp/tmp.Kfl6jA230L + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.gmxU6WaGGk/find-4th + compare_mongo_cmd find myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260 -4th + local command=find + local uri=myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:53:49+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' egrep: warning: egrep is obsolescent; using grep -E +++ mktemp ++ local LAST_OUT=/tmp/tmp.EFiyYkJUVd +++ mktemp ++ local LAST_ERR=/tmp/tmp.tEuYO9Og2q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EFiyYkJUVd ++ cat /tmp/tmp.tEuYO9Og2q ++ rm /tmp/tmp.EFiyYkJUVd /tmp/tmp.tEuYO9Og2q ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Ub5qEPn7FP ++ mktemp + local LAST_ERR=/tmp/tmp.cL562I1LXh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ub5qEPn7FP + cat /tmp/tmp.cL562I1LXh + rm /tmp/tmp.Ub5qEPn7FP /tmp/tmp.cL562I1LXh + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.gmxU6WaGGk/find-4th + backup_name_minio_2=backup-minio2 + run_backup backup-minio2 0 + local name=backup-minio2 + local idx=0 + desc 'run backup backup-minio2-0' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio2-0 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/conf/backup-minio2.yml + /usr/sbin/sed -e 's/name:/name: backup-minio2-0/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.IP8Rgu4Nqk ++ mktemp + local LAST_ERR=/tmp/tmp.N2pgwo1iAe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IP8Rgu4Nqk perconaservermongodbbackup.psmdb.percona.com/backup-minio2-0 created + cat /tmp/tmp.N2pgwo1iAe + rm /tmp/tmp.IP8Rgu4Nqk /tmp/tmp.N2pgwo1iAe + return 0 + wait_backup backup-minio2-0 + local backup_name=backup-minio2-0 + local target_state=ready + set +o xtrace waiting for backup-minio2-0 to reach ready state......OK ++ run_mongo 'new Date().toISOString()' myApp:myPass@cluster2-rs0.pitr-6260 mongodb '' --quiet ++ local 'command=new Date().toISOString()' ++ local uri=myApp:myPass@cluster2-rs0.pitr-6260 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match' ++ cut -c1-19 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ tr T ' ' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dvviflDwUG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.82ctj5tq1S +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.dvviflDwUG +++ cat /tmp/tmp.82ctj5tq1S +++ rm /tmp/tmp.dvviflDwUG /tmp/tmp.82ctj5tq1S +++ return 0 ++ local client_container=psmdb-client-696897d69b-cg7fs ++ local mongo_flag=--quiet ++ [[ myApp:myPass@cluster2-rs0.pitr-6260 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''new Date().toISOString()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MEjh47XECV +++ mktemp ++ local LAST_ERR=/tmp/tmp.krwDIJbNfd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''new Date().toISOString()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MEjh47XECV ++ cat /tmp/tmp.krwDIJbNfd ++ rm /tmp/tmp.MEjh47XECV /tmp/tmp.krwDIJbNfd ++ return 0 + time_now='2025-12-04 20:54:10' + write_document -5th cluster2-rs0 + local cmp_postfix=-5th + local cluster_name=cluster2-rs0 + desc 'write initial data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write initial data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@cluster2-rs0.pitr-6260 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@cluster2-rs0.pitr-6260 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gfX0xvvzlf +++ mktemp ++ local LAST_ERR=/tmp/tmp.vxz64IJWzF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gfX0xvvzlf ++ cat /tmp/tmp.vxz64IJWzF ++ rm /tmp/tmp.gfX0xvvzlf /tmp/tmp.vxz64IJWzF ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@cluster2-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ththuKeXYD ++ mktemp + local LAST_ERR=/tmp/tmp.uSVpCJqJ8F + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ththuKeXYD Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cluster2-rs0-2.cluster2-rs0.pitr-6260.svc.cluster.local:27017,cluster2-rs0-1.cluster2-rs0.pitr-6260.svc.cluster.local:27017,cluster2-rs0-0.cluster2-rs0.pitr-6260.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("29da49e1-3546-4c57-83d9-778d122fe4d4") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.uSVpCJqJ8F + rm /tmp/tmp.ththuKeXYD /tmp/tmp.uSVpCJqJ8F + return 0 + minikube_sleep + sleep_time=10 + [[ '' == 1 ]] + compare_mongo_cmd find myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260 -5th + local command=find + local uri=myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260 + local postfix=-5th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:54:14+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ifV5hLZLkq +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_ERR=/tmp/tmp.GkkBDO0G09 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ifV5hLZLkq ++ cat /tmp/tmp.GkkBDO0G09 ++ rm /tmp/tmp.ifV5hLZLkq /tmp/tmp.GkkBDO0G09 ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.11JOBQXr0E ++ mktemp + local LAST_ERR=/tmp/tmp.KnHy6W1R6U + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.11JOBQXr0E + cat /tmp/tmp.KnHy6W1R6U + rm /tmp/tmp.11JOBQXr0E /tmp/tmp.KnHy6W1R6U + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-5th.json /tmp/tmp.gmxU6WaGGk/find-5th + compare_mongo_cmd find myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260 -5th + local command=find + local uri=myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260 + local postfix=-5th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:54:16+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.a8jwUzQQ0G +++ mktemp ++ local LAST_ERR=/tmp/tmp.QSPXgAkUTI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.a8jwUzQQ0G ++ cat /tmp/tmp.QSPXgAkUTI ++ rm /tmp/tmp.a8jwUzQQ0G /tmp/tmp.QSPXgAkUTI ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.MtxXCZxbsh ++ mktemp + local LAST_ERR=/tmp/tmp.4w5DY29DqL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MtxXCZxbsh + cat /tmp/tmp.4w5DY29DqL + rm /tmp/tmp.MtxXCZxbsh /tmp/tmp.4w5DY29DqL + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-5th.json /tmp/tmp.gmxU6WaGGk/find-5th + compare_mongo_cmd find myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260 -5th + local command=find + local uri=myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260 + local postfix=-5th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:54:19+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.c4FNUBh4FZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.QMiCuf3BhO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.c4FNUBh4FZ ++ cat /tmp/tmp.QMiCuf3BhO ++ rm /tmp/tmp.c4FNUBh4FZ /tmp/tmp.QMiCuf3BhO ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.0VcUYhcDty ++ mktemp + local LAST_ERR=/tmp/tmp.Z10lih0VM1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0VcUYhcDty + cat /tmp/tmp.Z10lih0VM1 + rm /tmp/tmp.0VcUYhcDty /tmp/tmp.Z10lih0VM1 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-5th.json /tmp/tmp.gmxU6WaGGk/find-5th + sleep 2 + check_recovery backup-minio2-0 date '2025-12-04 20:54:10' -4th '' cluster2 + local backup_name=backup-minio2-0 + local restore_type=date + local 'restore_date=2025-12-04 20:54:10' + local cmp_postfix=-4th + local backupSource= + local cluster_name=cluster2 + local selective_collection= + local restore_name=restore-backup-minio2-0 + local restore_file=restore.yml + local cluster + cluster=cluster2-rs0 + desc 'write more data before restore by date' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by date ----------------------------------------------------------------------------------- + sleep 60 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@cluster2-rs0.pitr-6260 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@cluster2-rs0.pitr-6260 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aodSKhPuNN +++ mktemp ++ local LAST_ERR=/tmp/tmp.7rsivQzlxz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aodSKhPuNN ++ cat /tmp/tmp.7rsivQzlxz ++ rm /tmp/tmp.aodSKhPuNN /tmp/tmp.7rsivQzlxz ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@cluster2-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.E6QM8m6em2 ++ mktemp + local LAST_ERR=/tmp/tmp.wDOGgnMjcc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.E6QM8m6em2 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cluster2-rs0-2.cluster2-rs0.pitr-6260.svc.cluster.local:27017,cluster2-rs0-1.cluster2-rs0.pitr-6260.svc.cluster.local:27017,cluster2-rs0-0.cluster2-rs0.pitr-6260.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("d7db4093-e94e-4eed-978e-82cfde097d2e") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.wDOGgnMjcc + rm /tmp/tmp.E6QM8m6em2 /tmp/tmp.wDOGgnMjcc + return 0 + '[' -n '' ']' + desc 'waiting for chunks to be uploaded' + set +o xtrace ----------------------------------------------------------------------------------- waiting for chunks to be uploaded ----------------------------------------------------------------------------------- + sleep 120 + desc 'check restore by date' + set +o xtrace ----------------------------------------------------------------------------------- check restore by date ----------------------------------------------------------------------------------- + '[' -z '' ']' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio2-0/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio2-0/' + /usr/sbin/sed -e 's/pitrType:/type: date/' + /usr/sbin/sed -e 's/clusterName:/clusterName: cluster2/' + kubectl_bin apply -f - + '[' -n '' ']' + yq + '[' -z '2025-12-04 20:54:10' ']' ++ mktemp + /usr/sbin/sed -e 's/date:/date: 2025-12-04 20:54:10/' + /usr/sbin/sed -e /backupSource/,+2d + local LAST_OUT=/tmp/tmp.DHQPOuWU3L ++ mktemp + local LAST_ERR=/tmp/tmp.WlfUkw1lu4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DHQPOuWU3L perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio2-0 created + cat /tmp/tmp.WlfUkw1lu4 + rm /tmp/tmp.DHQPOuWU3L /tmp/tmp.WlfUkw1lu4 + return 0 + wait_restore backup-minio2-0 cluster2 + local backup_name=backup-minio2-0 + local cluster_name=cluster2 + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio2-0 object to be created.OK Waiting psmdb-restore/restore-backup-minio2-0 to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency cluster2 + local cluster_name=cluster2 + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb cluster2 -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5w1e1QX7lu +++ mktemp ++ local LAST_ERR=/tmp/tmp.uCKKZlUd1o ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cluster2 -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5w1e1QX7lu ++ cat /tmp/tmp.uCKKZlUd1o ++ rm /tmp/tmp.5w1e1QX7lu /tmp/tmp.uCKKZlUd1o ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + echo + set -o xtrace + '[' -n '' ']' + compare_mongo_cmd find myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260 -4th + local command=find + local uri=myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:57:55+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.9jLyNiLONJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.alFiOpumW2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9jLyNiLONJ ++ cat /tmp/tmp.alFiOpumW2 ++ rm /tmp/tmp.9jLyNiLONJ /tmp/tmp.alFiOpumW2 ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.0cgk4oaOm6 ++ mktemp + local LAST_ERR=/tmp/tmp.jAc7abwW67 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0cgk4oaOm6 + cat /tmp/tmp.jAc7abwW67 + rm /tmp/tmp.0cgk4oaOm6 /tmp/tmp.jAc7abwW67 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.gmxU6WaGGk/find-4th + compare_mongo_cmd find myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260 -4th + local command=find + local uri=myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:57:57+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.ROPy2OyutQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.jzu6qIffnm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ROPy2OyutQ ++ cat /tmp/tmp.jzu6qIffnm ++ rm /tmp/tmp.ROPy2OyutQ /tmp/tmp.jzu6qIffnm ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.UzNdy2xSzk ++ mktemp + local LAST_ERR=/tmp/tmp.TmyCiw0ILk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UzNdy2xSzk + cat /tmp/tmp.TmyCiw0ILk + rm /tmp/tmp.UzNdy2xSzk /tmp/tmp.TmyCiw0ILk + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.gmxU6WaGGk/find-4th + compare_mongo_cmd find myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260 -4th + local command=find + local uri=myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-04T20:57:59+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ogvZT8f8WM egrep: warning: egrep is obsolescent; using grep -E +++ mktemp ++ local LAST_ERR=/tmp/tmp.yeFFzkO3pj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ogvZT8f8WM ++ cat /tmp/tmp.yeFFzkO3pj ++ rm /tmp/tmp.ogvZT8f8WM /tmp/tmp.yeFFzkO3pj ++ return 0 + local client_container=psmdb-client-696897d69b-cg7fs + local mongo_flag= + [[ myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.VJLeXc4jmH ++ mktemp + local LAST_ERR=/tmp/tmp.dxCvwhecNw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-cg7fs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-6260.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VJLeXc4jmH + cat /tmp/tmp.dxCvwhecNw + rm /tmp/tmp.VJLeXc4jmH /tmp/tmp.dxCvwhecNw + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.gmxU6WaGGk/find-4th + kubectl patch psmdb some-name --type=merge --patch '{"spec": {"backup": {"pitr": {"enabled": false}}}}' perconaservermongodb.psmdb.percona.com/some-name patched + kubectl patch psmdb cluster2 --type=merge --patch '{"spec": {"backup": {"pitr": {"enabled": false}}}}' perconaservermongodb.psmdb.percona.com/cluster2 patched + sleep 20 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.bJ0spAoxaC ++ mktemp + local LAST_ERR=/tmp/tmp.quKhOCxbhN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bJ0spAoxaC perconaservermongodbbackup.psmdb.percona.com "backup-minio-0" deleted from pitr-6260 namespace perconaservermongodbbackup.psmdb.percona.com "backup-minio-1" deleted from pitr-6260 namespace perconaservermongodbbackup.psmdb.percona.com "backup-minio-2" deleted from pitr-6260 namespace perconaservermongodbbackup.psmdb.percona.com "backup-minio-3" deleted from pitr-6260 namespace perconaservermongodbbackup.psmdb.percona.com "backup-minio2-0" deleted from pitr-6260 namespace + cat /tmp/tmp.quKhOCxbhN + rm /tmp/tmp.bJ0spAoxaC /tmp/tmp.quKhOCxbhN + return 0 + destroy pitr-6260 + local namespace=pitr-6260 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.k30V2kobKE +++ mktemp ++ local LAST_ERR=/tmp/tmp.LSIi2NJxjE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.k30V2kobKE ++ cat /tmp/tmp.LSIi2NJxjE No resources found in pitr-6260 namespace. ++ rm /tmp/tmp.k30V2kobKE /tmp/tmp.LSIi2NJxjE ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.UdAR3IY1j5 ++ mktemp + local LAST_ERR=/tmp/tmp.u9C7G4Et8r + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UdAR3IY1j5 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.u9C7G4Et8r + rm /tmp/tmp.UdAR3IY1j5 /tmp/tmp.u9C7G4Et8r + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.NOqXg2aO6W ++ mktemp + local LAST_ERR=/tmp/tmp.4G80G2I5Wl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NOqXg2aO6W + cat /tmp/tmp.4G80G2I5Wl + rm /tmp/tmp.NOqXg2aO6W /tmp/tmp.4G80G2I5Wl + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.E36sdflTH0 ++ mktemp + local LAST_ERR=/tmp/tmp.SULAMV45Ey + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.E36sdflTH0 + cat /tmp/tmp.SULAMV45Ey + rm /tmp/tmp.E36sdflTH0 /tmp/tmp.SULAMV45Ey + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.8jmwndImMq ++ mktemp + local LAST_ERR=/tmp/tmp.jCddluraFL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8jmwndImMq + cat /tmp/tmp.jCddluraFL + rm /tmp/tmp.8jmwndImMq /tmp/tmp.jCddluraFL + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.9SpXe3PBiH ++ mktemp + local LAST_ERR=/tmp/tmp.GydYJitbG4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9SpXe3PBiH clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.GydYJitbG4 + rm /tmp/tmp.9SpXe3PBiH /tmp/tmp.GydYJitbG4 + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.dadMAfPRd0 ++ mktemp + local LAST_ERR=/tmp/tmp.JboaAD5EuL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.dadMAfPRd0 + cat /tmp/tmp.JboaAD5EuL Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.dadMAfPRd0 + cat /tmp/tmp.JboaAD5EuL Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.dadMAfPRd0 + cat /tmp/tmp.JboaAD5EuL Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.dadMAfPRd0 + cat /tmp/tmp.JboaAD5EuL Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.dadMAfPRd0 /tmp/tmp.JboaAD5EuL + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace pitr-6260 + rm -rf /tmp/tmp.gmxU6WaGGk + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.jtXfg17v0V + local LAST_OUT=/tmp/tmp.LXktg5DE4g + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.C5OM70fnta + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.PrEwzKUhxs + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace pitr-6260 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator