Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/logs/pitr-physical.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + main + create_infra pitr-physical-22757 + local ns=pitr-physical-22757 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.RNS15mQFzU ++ mktemp + local LAST_ERR=/tmp/tmp.ECClSpRMOz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RNS15mQFzU customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.ECClSpRMOz + rm /tmp/tmp.RNS15mQFzU /tmp/tmp.ECClSpRMOz + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.gBxvlbg4bY ++ mktemp + local LAST_ERR=/tmp/tmp.G5R8KyRb9l + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gBxvlbg4bY + cat /tmp/tmp.G5R8KyRb9l + rm /tmp/tmp.gBxvlbg4bY /tmp/tmp.G5R8KyRb9l + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.sRwubpsbOZ ++ mktemp + local LAST_ERR=/tmp/tmp.gFGPOCXQ6n + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sRwubpsbOZ + cat /tmp/tmp.gFGPOCXQ6n + rm /tmp/tmp.sRwubpsbOZ /tmp/tmp.gFGPOCXQ6n + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.nbm3JKi1Ox ++ mktemp + local LAST_ERR=/tmp/tmp.UiVhU0FSZC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nbm3JKi1Ox + cat /tmp/tmp.UiVhU0FSZC + rm /tmp/tmp.nbm3JKi1Ox /tmp/tmp.UiVhU0FSZC + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.cJSFL0i8Gu ++ mktemp + local LAST_ERR=/tmp/tmp.TKJt6lf9iN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cJSFL0i8Gu clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.TKJt6lf9iN + rm /tmp/tmp.cJSFL0i8Gu /tmp/tmp.TKJt6lf9iN + return 0 + check_crd_for_deletion PR-2266-f9fc55604 + local git_tag=PR-2266-f9fc55604 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2266-f9fc55604/deploy/crd.yaml ++ /usr/sbin/sed s/---//g ++ yq eval .metadata.name ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mQk6T6xVPr +++ mktemp ++ local LAST_ERR=/tmp/tmp.WOSMxDPoVn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.mQk6T6xVPr ++ cat /tmp/tmp.WOSMxDPoVn Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.mQk6T6xVPr ++ cat /tmp/tmp.WOSMxDPoVn Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.mQk6T6xVPr ++ cat /tmp/tmp.WOSMxDPoVn Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.mQk6T6xVPr ++ cat /tmp/tmp.WOSMxDPoVn Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.mQk6T6xVPr /tmp/tmp.WOSMxDPoVn ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ grep chaos-mesh.org ++ awk '{print $1}' ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrolebinding ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns ++ mktemp + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.19EBccsUcH ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.bJqXuNQqgc + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.vrs9MPJrMh ++ mktemp + local LAST_ERR=/tmp/tmp.rIjtUPFm28 + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get ns ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.19EBccsUcH + cat /tmp/tmp.bJqXuNQqgc + rm /tmp/tmp.19EBccsUcH /tmp/tmp.bJqXuNQqgc + return 0 namespace "pitr-physical-29898" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vrs9MPJrMh namespace "psmdb-operator" deleted + cat /tmp/tmp.rIjtUPFm28 + rm /tmp/tmp.vrs9MPJrMh /tmp/tmp.rIjtUPFm28 + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.dF59JNlRAy ++ mktemp + local LAST_ERR=/tmp/tmp.JIW0lf4D3u + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dF59JNlRAy + cat /tmp/tmp.JIW0lf4D3u + rm /tmp/tmp.dF59JNlRAy /tmp/tmp.JIW0lf4D3u + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.SeT3qAvcxA ++ mktemp + local LAST_ERR=/tmp/tmp.1QUHeTmPy1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SeT3qAvcxA namespace/psmdb-operator created + cat /tmp/tmp.1QUHeTmPy1 + rm /tmp/tmp.SeT3qAvcxA /tmp/tmp.1QUHeTmPy1 + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.HC8oSZhCXx +++ mktemp ++ local LAST_ERR=/tmp/tmp.vz0kE9EejI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HC8oSZhCXx ++ cat /tmp/tmp.vz0kE9EejI ++ rm /tmp/tmp.HC8oSZhCXx /tmp/tmp.vz0kE9EejI ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2266-f9fc55604-9-cluster7 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.WRdmq697o4 ++ mktemp + local LAST_ERR=/tmp/tmp.TGTUhdftbR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2266-f9fc55604-9-cluster7 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WRdmq697o4 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2266-f9fc55604-9-cluster7" modified. + cat /tmp/tmp.TGTUhdftbR + rm /tmp/tmp.WRdmq697o4 /tmp/tmp.TGTUhdftbR + return 0 + deploy_operator + desc 'start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2266-f9fc55604' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2266-f9fc55604 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/pitr-physical/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.x4ztv8ArE6 ++ mktemp + local LAST_ERR=/tmp/tmp.yS29ndWJUY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.x4ztv8ArE6 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.yS29ndWJUY + rm /tmp/tmp.x4ztv8ArE6 /tmp/tmp.yS29ndWJUY + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/cw-rbac.yaml + kubectl_bin apply -n psmdb-operator -f - + sed -e 's^namespace: .*^namespace: psmdb-operator^' ++ mktemp + local LAST_OUT=/tmp/tmp.QtwQl0jc87 ++ mktemp + local LAST_ERR=/tmp/tmp.AvoSFxkVU1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QtwQl0jc87 clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.AvoSFxkVU1 + rm /tmp/tmp.QtwQl0jc87 /tmp/tmp.AvoSFxkVU1 + return 0 + yq eval ' (.spec.template.spec.containers[].image = "docker.io/perconalab/percona-server-mongodb-operator:PR-2266-f9fc55604") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.JKLzwguNAG ++ mktemp + local LAST_ERR=/tmp/tmp.CyfTFo7pqe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JKLzwguNAG deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.CyfTFo7pqe + rm /tmp/tmp.JKLzwguNAG /tmp/tmp.CyfTFo7pqe + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.6awQasejp0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.SeTqWoGVdg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6awQasejp0 ++ cat /tmp/tmp.SeTqWoGVdg ++ rm /tmp/tmp.6awQasejp0 /tmp/tmp.SeTqWoGVdg ++ return 0 + wait_operator_pod percona-server-mongodb-operator-5554c4df68-4fmqm + local pod=percona-server-mongodb-operator-5554c4df68-4fmqm + set +o xtrace waiting for pod/percona-server-mongodb-operator-5554c4df68-4fmqm to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.uGvCnAOYQI +++ mktemp ++ local LAST_ERR=/tmp/tmp.IB2dM2R8FE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uGvCnAOYQI ++ cat /tmp/tmp.IB2dM2R8FE ++ rm /tmp/tmp.uGvCnAOYQI /tmp/tmp.IB2dM2R8FE ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-5554c4df68-4fmqm ++ mktemp + local LAST_OUT=/tmp/tmp.cFNrigOi2I ++ mktemp + local LAST_ERR=/tmp/tmp.T6YNx95OP5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-5554c4df68-4fmqm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cFNrigOi2I + cat /tmp/tmp.T6YNx95OP5 + rm /tmp/tmp.cFNrigOi2I /tmp/tmp.T6YNx95OP5 + return 0 2026-03-10T07:28:35.968Z INFO setup Manager starting up {"gitCommit": "f9fc5560468cd2df79998ec06ba830d1104a31a3", "gitBranch": "PR-2266-f9fc55604", "buildTime": "", "goVersion": "go1.25.8", "os": "linux", "arch": "amd64"} + create_namespace pitr-physical-22757 + local namespace=pitr-physical-22757 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + kubectl_bin get ns ++ mktemp + '[' -n '' ']' + desc 'cleaned up old namespaces pitr-physical-22757' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pitr-physical-22757 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pitr-physical-22757 --ignore-not-found + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.YCAHWlelJt ++ mktemp + local LAST_OUT=/tmp/tmp.xfGDfJaVvM ++ mktemp + local LAST_ERR=/tmp/tmp.QVxgDNe1gL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace pitr-physical-22757 --ignore-not-found + local LAST_ERR=/tmp/tmp.p5HGx4f3L3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YCAHWlelJt + cat /tmp/tmp.p5HGx4f3L3 + rm /tmp/tmp.YCAHWlelJt /tmp/tmp.p5HGx4f3L3 + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xfGDfJaVvM + cat /tmp/tmp.QVxgDNe1gL + rm /tmp/tmp.xfGDfJaVvM /tmp/tmp.QVxgDNe1gL + return 0 + kubectl_bin wait --for=delete namespace pitr-physical-22757 ++ mktemp + local LAST_OUT=/tmp/tmp.FlStn66wQE ++ mktemp + local LAST_ERR=/tmp/tmp.lXgFyazemg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace pitr-physical-22757 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FlStn66wQE + cat /tmp/tmp.lXgFyazemg + rm /tmp/tmp.FlStn66wQE /tmp/tmp.lXgFyazemg + return 0 + desc 'create namespace pitr-physical-22757' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pitr-physical-22757 ----------------------------------------------------------------------------------- + kubectl_bin create namespace pitr-physical-22757 ++ mktemp + local LAST_OUT=/tmp/tmp.RLLo98yC4c ++ mktemp + local LAST_ERR=/tmp/tmp.WJ7gDWRVd3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace pitr-physical-22757 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RLLo98yC4c namespace/pitr-physical-22757 created + cat /tmp/tmp.WJ7gDWRVd3 + rm /tmp/tmp.RLLo98yC4c /tmp/tmp.WJ7gDWRVd3 + return 0 + set_kube_ctx pitr-physical-22757 + local namespace=pitr-physical-22757 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.2eov1IjVA7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.V7B5KttHxK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2eov1IjVA7 ++ cat /tmp/tmp.V7B5KttHxK ++ rm /tmp/tmp.2eov1IjVA7 /tmp/tmp.V7B5KttHxK ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2266-f9fc55604-9-cluster7 --namespace=pitr-physical-22757 ++ mktemp + local LAST_OUT=/tmp/tmp.Bw4nDVHO7T ++ mktemp + local LAST_ERR=/tmp/tmp.CObGg7t5Kx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2266-f9fc55604-9-cluster7 --namespace=pitr-physical-22757 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Bw4nDVHO7T Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2266-f9fc55604-9-cluster7" modified. + cat /tmp/tmp.CObGg7t5Kx + rm /tmp/tmp.Bw4nDVHO7T /tmp/tmp.CObGg7t5Kx + return 0 + deploy_minio + local cert_secret= + local service_name=minio-service + desc 'install MinIO: minio-service' + set +o xtrace ----------------------------------------------------------------------------------- install MinIO: minio-service ----------------------------------------------------------------------------------- + helm uninstall minio-service + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + local endpoint=http://minio-service:9000 + minio_args=('--version' '5.4.0' '--set' 'replicas=1' '--set' 'mode=standalone' '--set' 'resources.requests.memory=256Mi' '--set' 'rootUser=rootuser' '--set' 'rootPassword=rootpass123' '--set' 'users[0].accessKey=some-access-key' '--set' 'users[0].secretKey=some-secret-key' '--set' 'users[0].policy=consoleAdmin' '--set' 'service.type=ClusterIP' '--set' 'configPathmc=/tmp/' '--set' 'securityContext.enabled=false' '--set' 'persistence.size=2G' '--set' 'fullnameOverride=minio-service' '--set' 'serviceAccount.create=true' '--set' 'serviceAccount.name=minio-service-sa') + local minio_args + [[ -n '' ]] + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio NAME: minio-service LAST DEPLOYED: Tue Mar 10 07:29:15 2026 NAMESPACE: pitr-physical-22757 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.pitr-physical-22757.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace pitr-physical-22757 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace pitr-physical-22757 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace pitr-physical-22757 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace pitr-physical-22757 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0rs6mjReRq +++ mktemp ++ local LAST_ERR=/tmp/tmp.4Rp0ZjB1Vt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0rs6mjReRq ++ cat /tmp/tmp.4Rp0ZjB1Vt ++ rm /tmp/tmp.0rs6mjReRq /tmp/tmp.4Rp0ZjB1Vt ++ return 0 + local MINIO_POD=minio-service-6d5f646cdc-mtwqt + wait_pod minio-service-6d5f646cdc-mtwqt + local pod=minio-service-6d5f646cdc-mtwqt + set +o xtrace waiting for pod/minio-service-6d5f646cdc-mtwqt to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-physical-22757.svc.cluster.local --tcp=9000 service/minio-service created + create_minio_bucket operator-testing http://minio-service:9000 + local bucket=operator-testing + local endpoint=http://minio-service:9000 + kubectl_bin run -i --rm aws-cli --image=docker.io/perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.QnmRWfHK96 ++ mktemp + local LAST_ERR=/tmp/tmp.BgCbugBMq4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=docker.io/perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QnmRWfHK96 make_bucket: operator-testing pod "aws-cli" deleted from pitr-physical-22757 namespace + cat /tmp/tmp.BgCbugBMq4 All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: unable to upgrade connection: container aws-cli not found in pod aws-cli_pitr-physical-22757 + rm /tmp/tmp.QnmRWfHK96 /tmp/tmp.BgCbugBMq4 + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/conf/minio-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.uTv35z4Ew1 ++ mktemp + local LAST_ERR=/tmp/tmp.oXsjpl21Q7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/conf/minio-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uTv35z4Ew1 secret/some-users created deployment.apps/psmdb-client created secret/minio-secret created + cat /tmp/tmp.oXsjpl21Q7 + rm /tmp/tmp.uTv35z4Ew1 /tmp/tmp.oXsjpl21Q7 + return 0 + cluster=some-name + desc 'create first PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/pitr-physical/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/pitr-physical/conf/some-name-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/pitr-physical/conf/some-name-rs0.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2266-f9fc55604"' + /usr/sbin/sed -e s/NAME_SPACE/pitr-physical-22757/g + local LAST_OUT=/tmp/tmp.fVo9uEKP2B + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + local LAST_ERR=/tmp/tmp.FPZX8J0fGY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fVo9uEKP2B perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.FPZX8J0fGY + rm /tmp/tmp.fVo9uEKP2B /tmp/tmp.FPZX8J0fGY + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.....OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0AUpMqMTms +++ mktemp ++ local LAST_ERR=/tmp/tmp.3DytuHcv0Y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0AUpMqMTms ++ cat /tmp/tmp.3DytuHcv0Y ++ rm /tmp/tmp.0AUpMqMTms /tmp/tmp.3DytuHcv0Y ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0NBgHR1BaD +++ mktemp ++ local LAST_ERR=/tmp/tmp.g1HQHMJmKm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0NBgHR1BaD ++ cat /tmp/tmp.g1HQHMJmKm ++ rm /tmp/tmp.0NBgHR1BaD /tmp/tmp.g1HQHMJmKm ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TBKHucTSSo +++ mktemp ++ local LAST_ERR=/tmp/tmp.6rJTatyrfG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TBKHucTSSo ++ cat /tmp/tmp.6rJTatyrfG ++ rm /tmp/tmp.TBKHucTSSo /tmp/tmp.6rJTatyrfG ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness........................................................ + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BiYAwCymxx +++ mktemp ++ local LAST_ERR=/tmp/tmp.9BW42KsiPI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BiYAwCymxx ++ cat /tmp/tmp.9BW42KsiPI ++ rm /tmp/tmp.BiYAwCymxx /tmp/tmp.9BW42KsiPI ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QtyIOITyDy +++ mktemp ++ local LAST_ERR=/tmp/tmp.aFkRRiQg9x ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QtyIOITyDy ++ cat /tmp/tmp.aFkRRiQg9x ++ rm /tmp/tmp.QtyIOITyDy /tmp/tmp.aFkRRiQg9x ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UXnWiqOK5K +++ mktemp ++ local LAST_ERR=/tmp/tmp.NzHD6iA4UD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UXnWiqOK5K ++ cat /tmp/tmp.NzHD6iA4UD ++ rm /tmp/tmp.UXnWiqOK5K /tmp/tmp.NzHD6iA4UD ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + sleep 10 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + write_initial_data + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.pitr-physical-22757 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.pitr-physical-22757 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vYrTy2oBlg +++ mktemp ++ local LAST_ERR=/tmp/tmp.UBrFdKHDRk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vYrTy2oBlg ++ cat /tmp/tmp.UBrFdKHDRk ++ rm /tmp/tmp.vYrTy2oBlg /tmp/tmp.UBrFdKHDRk ++ return 0 + local client_container=psmdb-client-bb8b97679-jsw4c + kubectl_bin exec psmdb-client-bb8b97679-jsw4c -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.zjINnEHUxL ++ mktemp + local LAST_ERR=/tmp/tmp.g9M2eC5R8t + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-jsw4c -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zjINnEHUxL Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("0384e060-901b-470a-91ab-bfabd2aad165") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.g9M2eC5R8t + rm /tmp/tmp.zjINnEHUxL /tmp/tmp.g9M2eC5R8t + return 0 + sleep 2 + write_document + local cmp_postfix= + local sleep_value=0 + log 'write initial data, read from all' + set +o xtrace [2026-03-10T07:33:49+0000] write initial data, read from all + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-physical-22757 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-22757 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9OhOexbK5x +++ mktemp ++ local LAST_ERR=/tmp/tmp.UNCbG7L8Ta ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9OhOexbK5x ++ cat /tmp/tmp.UNCbG7L8Ta ++ rm /tmp/tmp.9OhOexbK5x /tmp/tmp.UNCbG7L8Ta ++ return 0 + local client_container=psmdb-client-bb8b97679-jsw4c + kubectl_bin exec psmdb-client-bb8b97679-jsw4c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.LUi4aRumYK ++ mktemp + local LAST_ERR=/tmp/tmp.eM4apms4OJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-jsw4c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LUi4aRumYK Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("7bf8d1ad-f3b5-4fd4-b7d8-9481f7ff9a81") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.eM4apms4OJ + rm /tmp/tmp.LUi4aRumYK /tmp/tmp.eM4apms4OJ + return 0 + sleep 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-22757 + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-22757 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-03-10T07:33:52+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-22757 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-22757 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Xh1Jn3rBq3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.2URo2yzHKX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Xh1Jn3rBq3 ++ cat /tmp/tmp.2URo2yzHKX ++ rm /tmp/tmp.Xh1Jn3rBq3 /tmp/tmp.2URo2yzHKX ++ return 0 + local client_container=psmdb-client-bb8b97679-jsw4c + kubectl_bin exec psmdb-client-bb8b97679-jsw4c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.XKeklhPp6o ++ mktemp + local LAST_ERR=/tmp/tmp.qgNeVPXffF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-jsw4c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XKeklhPp6o + cat /tmp/tmp.qgNeVPXffF + rm /tmp/tmp.XKeklhPp6o /tmp/tmp.qgNeVPXffF + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/pitr-physical/compare/find.json /tmp/tmp.uSIEEO8Dq3/find + desc 'wait for pbm agents to start' + set +o xtrace ----------------------------------------------------------------------------------- wait for pbm agents to start ----------------------------------------------------------------------------------- + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-0...2026-03-10T07:32:59.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-1...2026-03-10T07:32:58.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-2...2026-03-10T07:33:10.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-0 + local agent_pod=some-name-rs1-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-0...2026-03-10T07:32:58.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-1 + local agent_pod=some-name-rs1-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-1...2026-03-10T07:33:01.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-2 + local agent_pod=some-name-rs1-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-2...2026-03-10T07:33:11.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-0 + local agent_pod=some-name-rs2-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-0...2026-03-10T07:32:57.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-1 + local agent_pod=some-name-rs2-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-1...2026-03-10T07:33:02.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-2 + local agent_pod=some-name-rs2-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-2...2026-03-10T07:33:11.000+0000 I listening for the commands + log 'Sleeping for 360 seconds to avoid PBM-1265' + set +o xtrace [2026-03-10T07:34:06+0000] Sleeping for 360 seconds to avoid PBM-1265 + sleep 360 + backup_name_minio=backup-minio + run_backup backup-minio 2 physical + local name=backup-minio + local idx=2 + local type=physical + desc 'run backup backup-minio-2' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-2 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/pitr-physical/conf/backup-minio.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-2/' + /usr/sbin/sed -e 's/type:/type: physical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.USIdpi6SIg ++ mktemp + local LAST_ERR=/tmp/tmp.2B78NRBKXb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.USIdpi6SIg perconaservermongodbbackup.psmdb.percona.com/backup-minio-2 created + cat /tmp/tmp.2B78NRBKXb + rm /tmp/tmp.USIdpi6SIg /tmp/tmp.2B78NRBKXb + return 0 + wait_backup backup-minio-2 + local backup_name=backup-minio-2 + local target_state=ready + set +o xtrace waiting for backup-minio-2 to reach ready state..........OK + sleep 5 + write_document -2nd 130 + local cmp_postfix=-2nd + local sleep_value=130 + log 'write initial data, read from all' + set +o xtrace [2026-03-10T07:40:31+0000] write initial data, read from all + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-physical-22757 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-22757 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9BPLfkgdGc +++ mktemp ++ local LAST_ERR=/tmp/tmp.Is6NnI7w5p ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9BPLfkgdGc ++ cat /tmp/tmp.Is6NnI7w5p ++ rm /tmp/tmp.9BPLfkgdGc /tmp/tmp.Is6NnI7w5p ++ return 0 + local client_container=psmdb-client-bb8b97679-jsw4c + kubectl_bin exec psmdb-client-bb8b97679-jsw4c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.qvPG5wy9wX ++ mktemp + local LAST_ERR=/tmp/tmp.3woeQGqbJo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-jsw4c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qvPG5wy9wX Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("d213030f-ca4a-433d-b490-91b352471979") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.3woeQGqbJo + rm /tmp/tmp.qvPG5wy9wX /tmp/tmp.3woeQGqbJo + return 0 + sleep 130 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-22757 -2nd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-22757 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-03-10T07:42:44+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-22757 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-22757 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uHgdflRNWH +++ mktemp ++ local LAST_ERR=/tmp/tmp.tA0NknEjdE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uHgdflRNWH ++ cat /tmp/tmp.tA0NknEjdE ++ rm /tmp/tmp.uHgdflRNWH /tmp/tmp.tA0NknEjdE ++ return 0 + local client_container=psmdb-client-bb8b97679-jsw4c + kubectl_bin exec psmdb-client-bb8b97679-jsw4c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.NKbx9lv2oy ++ mktemp + local LAST_ERR=/tmp/tmp.ODE2I6t349 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-jsw4c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NKbx9lv2oy + cat /tmp/tmp.ODE2I6t349 + rm /tmp/tmp.NKbx9lv2oy /tmp/tmp.ODE2I6t349 + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/pitr-physical/compare/find-2nd.json /tmp/tmp.uSIEEO8Dq3/find-2nd ++ date +%s + last_write=1773128567 ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7Y1M80psDv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4f9hY6Mpr2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.7Y1M80psDv +++ cat /tmp/tmp.4f9hY6Mpr2 +++ rm /tmp/tmp.7Y1M80psDv /tmp/tmp.4f9hY6Mpr2 +++ return 0 ++ echo 1773128446 + last_chunk=1773128446 + log 'last write: 1773128567' + set +o xtrace [2026-03-10T07:42:49+0000] last write: 1773128567 + log 'last chunk: 1773128446' + set +o xtrace [2026-03-10T07:42:49+0000] last chunk: 1773128446 + retries=0 + [[ 1773128446 -gt 1773128567 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qDojpQnE9b ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AhySMVAKsF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.qDojpQnE9b +++ cat /tmp/tmp.AhySMVAKsF +++ rm /tmp/tmp.qDojpQnE9b /tmp/tmp.AhySMVAKsF +++ return 0 ++ echo 1773128446 + last_chunk=1773128446 + retries=1 ++ format_date 1773128446 ++ local timestamp=1773128446 +++ TZ=UTC +++ /usr/sbin/date -d@1773128446 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:40:46 ++ format_date 1773128567 ++ local timestamp=1773128567 +++ TZ=UTC +++ /usr/sbin/date -d@1773128567 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:42:47 + log 'Waiting for last oplog chunk (2026-03-10 07:40:46) to be greater than last write (2026-03-10 07:42:47)' + set +o xtrace [2026-03-10T07:42:50+0000] Waiting for last oplog chunk (2026-03-10 07:40:46) to be greater than last write (2026-03-10 07:42:47) + sleep 10 + [[ 1773128446 -gt 1773128567 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GJ6bwWNluW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gCll3uHAMp +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.GJ6bwWNluW +++ cat /tmp/tmp.gCll3uHAMp +++ rm /tmp/tmp.GJ6bwWNluW /tmp/tmp.gCll3uHAMp +++ return 0 ++ echo 1773128446 + last_chunk=1773128446 + retries=2 ++ format_date 1773128446 ++ local timestamp=1773128446 +++ TZ=UTC +++ /usr/sbin/date -d@1773128446 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:40:46 ++ format_date 1773128567 ++ local timestamp=1773128567 +++ TZ=UTC +++ /usr/sbin/date -d@1773128567 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:42:47 + log 'Waiting for last oplog chunk (2026-03-10 07:40:46) to be greater than last write (2026-03-10 07:42:47)' + set +o xtrace [2026-03-10T07:43:03+0000] Waiting for last oplog chunk (2026-03-10 07:40:46) to be greater than last write (2026-03-10 07:42:47) + sleep 10 + [[ 1773128446 -gt 1773128567 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.VCy0syvS5V ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JcRbhuRbVg +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.VCy0syvS5V +++ cat /tmp/tmp.JcRbhuRbVg +++ rm /tmp/tmp.VCy0syvS5V /tmp/tmp.JcRbhuRbVg +++ return 0 ++ echo 1773128572 + last_chunk=1773128572 + retries=3 ++ format_date 1773128572 ++ local timestamp=1773128572 +++ TZ=UTC +++ /usr/sbin/date -d@1773128572 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:42:52 ++ format_date 1773128567 ++ local timestamp=1773128567 +++ TZ=UTC +++ /usr/sbin/date -d@1773128567 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:42:47 + log 'Waiting for last oplog chunk (2026-03-10 07:42:52) to be greater than last write (2026-03-10 07:42:47)' + set +o xtrace [2026-03-10T07:43:15+0000] Waiting for last oplog chunk (2026-03-10 07:42:52) to be greater than last write (2026-03-10 07:42:47) + sleep 10 + [[ 1773128572 -gt 1773128567 ]] + sleep 10 + check_recovery backup-minio-2 date 1773128572 -2nd some-name + local backup_name=backup-minio-2 + local restore_type=date + local restore_date=1773128572 + local cmp_postfix=-2nd + local cluster_name=some-name + local backupSource= ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.V9Bsr11dgs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vadDkseI1k +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.V9Bsr11dgs +++ cat /tmp/tmp.vadDkseI1k +++ rm /tmp/tmp.V9Bsr11dgs /tmp/tmp.vadDkseI1k +++ return 0 ++ echo 1773128572 + local latest_ts=1773128572 + desc 'write more data before restore by date' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by date ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.pitr-physical-22757 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-22757 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5JanFqlPj3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.LJDVvuCgPo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5JanFqlPj3 ++ cat /tmp/tmp.LJDVvuCgPo ++ rm /tmp/tmp.5JanFqlPj3 /tmp/tmp.LJDVvuCgPo ++ return 0 + local client_container=psmdb-client-bb8b97679-jsw4c + kubectl_bin exec psmdb-client-bb8b97679-jsw4c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.dfPekrXxTJ ++ mktemp + local LAST_ERR=/tmp/tmp.g5D62LEJtD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-jsw4c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dfPekrXxTJ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("41aa4338-50f0-41bc-85b3-44c2f5e3467f") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.g5D62LEJtD + rm /tmp/tmp.dfPekrXxTJ /tmp/tmp.g5D62LEJtD + return 0 + [[ -n 1773128572 ]] ++ format_date 1773128572 ++ local timestamp=1773128572 +++ TZ=UTC +++ /usr/sbin/date -d@1773128572 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:42:52 + desc 'Restoring to time 2026-03-10 07:42:52' + set +o xtrace ----------------------------------------------------------------------------------- Restoring to time 2026-03-10 07:42:52 ----------------------------------------------------------------------------------- + retries=0 + [[ 1773128572 -gt 1773128572 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.AQxkemIOl9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.K8mjkJ4SGQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.AQxkemIOl9 +++ cat /tmp/tmp.K8mjkJ4SGQ +++ rm /tmp/tmp.AQxkemIOl9 /tmp/tmp.K8mjkJ4SGQ +++ return 0 ++ echo 1773128572 + latest_ts=1773128572 + retries=1 ++ format_date 1773128572 ++ local timestamp=1773128572 +++ TZ=UTC +++ /usr/sbin/date -d@1773128572 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:42:52 ++ format_date 1773128572 ++ local timestamp=1773128572 +++ TZ=UTC +++ /usr/sbin/date -d@1773128572 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:42:52 + echo 'Waiting for last oplog chunk (2026-03-10 07:42:52) to be greater than restore target (2026-03-10 07:42:52)' Waiting for last oplog chunk (2026-03-10 07:42:52) to be greater than restore target (2026-03-10 07:42:52) + sleep 10 + [[ 1773128572 -gt 1773128572 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.EHUAthujCR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FOtbnFp9CD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.EHUAthujCR +++ cat /tmp/tmp.FOtbnFp9CD +++ rm /tmp/tmp.EHUAthujCR /tmp/tmp.FOtbnFp9CD +++ return 0 ++ echo 1773128572 + latest_ts=1773128572 + retries=2 ++ format_date 1773128572 ++ local timestamp=1773128572 +++ TZ=UTC +++ /usr/sbin/date -d@1773128572 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:42:52 ++ format_date 1773128572 ++ local timestamp=1773128572 +++ TZ=UTC +++ /usr/sbin/date -d@1773128572 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:42:52 + echo 'Waiting for last oplog chunk (2026-03-10 07:42:52) to be greater than restore target (2026-03-10 07:42:52)' Waiting for last oplog chunk (2026-03-10 07:42:52) to be greater than restore target (2026-03-10 07:42:52) + sleep 10 + [[ 1773128572 -gt 1773128572 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zibY6Fcnzm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.j3aSFEr3TQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.zibY6Fcnzm +++ cat /tmp/tmp.j3aSFEr3TQ +++ rm /tmp/tmp.zibY6Fcnzm /tmp/tmp.j3aSFEr3TQ +++ return 0 ++ echo 1773128572 + latest_ts=1773128572 + retries=3 ++ format_date 1773128572 ++ local timestamp=1773128572 +++ TZ=UTC +++ /usr/sbin/date -d@1773128572 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:42:52 ++ format_date 1773128572 ++ local timestamp=1773128572 +++ TZ=UTC +++ /usr/sbin/date -d@1773128572 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:42:52 + echo 'Waiting for last oplog chunk (2026-03-10 07:42:52) to be greater than restore target (2026-03-10 07:42:52)' Waiting for last oplog chunk (2026-03-10 07:42:52) to be greater than restore target (2026-03-10 07:42:52) + sleep 10 + [[ 1773128572 -gt 1773128572 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.u03ipjyVmC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.V5xRvrZqDZ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.u03ipjyVmC +++ cat /tmp/tmp.V5xRvrZqDZ +++ rm /tmp/tmp.u03ipjyVmC /tmp/tmp.V5xRvrZqDZ +++ return 0 ++ echo 1773128572 + latest_ts=1773128572 + retries=4 ++ format_date 1773128572 ++ local timestamp=1773128572 +++ TZ=UTC +++ /usr/sbin/date -d@1773128572 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:42:52 ++ format_date 1773128572 ++ local timestamp=1773128572 +++ TZ=UTC +++ /usr/sbin/date -d@1773128572 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:42:52 + echo 'Waiting for last oplog chunk (2026-03-10 07:42:52) to be greater than restore target (2026-03-10 07:42:52)' Waiting for last oplog chunk (2026-03-10 07:42:52) to be greater than restore target (2026-03-10 07:42:52) + sleep 10 + [[ 1773128572 -gt 1773128572 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.luggjYuhFz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.lEKQIKfLVp +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.luggjYuhFz +++ cat /tmp/tmp.lEKQIKfLVp +++ rm /tmp/tmp.luggjYuhFz /tmp/tmp.lEKQIKfLVp +++ return 0 ++ echo 1773128572 + latest_ts=1773128572 + retries=5 ++ format_date 1773128572 ++ local timestamp=1773128572 +++ TZ=UTC +++ /usr/sbin/date -d@1773128572 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:42:52 ++ format_date 1773128572 ++ local timestamp=1773128572 +++ TZ=UTC +++ /usr/sbin/date -d@1773128572 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:42:52 + echo 'Waiting for last oplog chunk (2026-03-10 07:42:52) to be greater than restore target (2026-03-10 07:42:52)' Waiting for last oplog chunk (2026-03-10 07:42:52) to be greater than restore target (2026-03-10 07:42:52) + sleep 10 + [[ 1773128572 -gt 1773128572 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.sRO63BRlpD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gOF7QJ9t62 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.sRO63BRlpD +++ cat /tmp/tmp.gOF7QJ9t62 +++ rm /tmp/tmp.sRO63BRlpD /tmp/tmp.gOF7QJ9t62 +++ return 0 ++ echo 1773128572 + latest_ts=1773128572 + retries=6 ++ format_date 1773128572 ++ local timestamp=1773128572 +++ TZ=UTC +++ /usr/sbin/date -d@1773128572 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:42:52 ++ format_date 1773128572 ++ local timestamp=1773128572 +++ TZ=UTC +++ /usr/sbin/date -d@1773128572 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:42:52 + echo 'Waiting for last oplog chunk (2026-03-10 07:42:52) to be greater than restore target (2026-03-10 07:42:52)' Waiting for last oplog chunk (2026-03-10 07:42:52) to be greater than restore target (2026-03-10 07:42:52) + sleep 10 + [[ 1773128572 -gt 1773128572 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.syUWVQHbMg ++++ mktemp +++ local LAST_ERR=/tmp/tmp.tGa8ZaxxlB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.syUWVQHbMg +++ cat /tmp/tmp.tGa8ZaxxlB +++ rm /tmp/tmp.syUWVQHbMg /tmp/tmp.tGa8ZaxxlB +++ return 0 ++ echo 1773128577 + latest_ts=1773128577 + retries=7 ++ format_date 1773128577 ++ local timestamp=1773128577 +++ TZ=UTC +++ /usr/sbin/date -d@1773128577 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:42:57 ++ format_date 1773128572 ++ local timestamp=1773128572 +++ TZ=UTC +++ /usr/sbin/date -d@1773128572 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:42:52 + echo 'Waiting for last oplog chunk (2026-03-10 07:42:57) to be greater than restore target (2026-03-10 07:42:52)' Waiting for last oplog chunk (2026-03-10 07:42:57) to be greater than restore target (2026-03-10 07:42:52) + sleep 10 + [[ 1773128577 -gt 1773128572 ]] + '[' -z '' ']' + desc 'check restore by date' + set +o xtrace ----------------------------------------------------------------------------------- check restore by date ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/pitr-physical/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-2/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio-2/' + /usr/sbin/sed -e /backupSource/,+8d + /usr/sbin/sed -e 's/pitrType:/type: date/' + '[' -z 1773128572 ']' + kubectl_bin apply -f - ++ format_date 1773128572 ++ local timestamp=1773128572 ++ mktemp +++ TZ=UTC +++ /usr/sbin/date -d@1773128572 '+%Y-%m-%d %H:%M:%S' + local LAST_OUT=/tmp/tmp.pmsmQ9fqHT ++ echo 2026-03-10 07:42:52 ++ mktemp + /usr/sbin/sed -e 's/date:/date: 2026-03-10 07:42:52/' + local LAST_ERR=/tmp/tmp.fAj5m4jgqp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pmsmQ9fqHT perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-2 created + cat /tmp/tmp.fAj5m4jgqp + rm /tmp/tmp.pmsmQ9fqHT /tmp/tmp.fAj5m4jgqp + return 0 + wait_restore backup-minio-2 some-name requested 0 1200 + local backup_name=backup-minio-2 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-2 object to be created.OK Waiting psmdb-restore/restore-backup-minio-2 to reach state "requested" ..............OK after 13 minutes + [[ 0 -eq 1 ]] + echo + wait_restore backup-minio-2 some-name ready 0 1600 + local backup_name=backup-minio-2 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1600 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-2 object to be created.OK Waiting psmdb-restore/restore-backup-minio-2 to reach state "ready" ...OK after 2 minutes + [[ 0 -eq 1 ]] + echo + set -o xtrace + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ka7wHjBqyj +++ mktemp ++ local LAST_ERR=/tmp/tmp.bR4EL8uHgO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ka7wHjBqyj ++ cat /tmp/tmp.bR4EL8uHgO ++ rm /tmp/tmp.ka7wHjBqyj /tmp/tmp.bR4EL8uHgO ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.x1mPf3teUR +++ mktemp ++ local LAST_ERR=/tmp/tmp.o9CQeL5gMD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.x1mPf3teUR ++ cat /tmp/tmp.o9CQeL5gMD ++ rm /tmp/tmp.x1mPf3teUR /tmp/tmp.o9CQeL5gMD ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xbZmgL2aFN +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZDXYapY2o9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xbZmgL2aFN ++ cat /tmp/tmp.ZDXYapY2o9 ++ rm /tmp/tmp.xbZmgL2aFN /tmp/tmp.ZDXYapY2o9 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness..................................................................................... + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E8dAFKOWho +++ mktemp ++ local LAST_ERR=/tmp/tmp.bgFVYeogCa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E8dAFKOWho ++ cat /tmp/tmp.bgFVYeogCa ++ rm /tmp/tmp.E8dAFKOWho /tmp/tmp.bgFVYeogCa ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.775liFbwUj +++ mktemp ++ local LAST_ERR=/tmp/tmp.1WHak4wXDr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.775liFbwUj ++ cat /tmp/tmp.1WHak4wXDr ++ rm /tmp/tmp.775liFbwUj /tmp/tmp.1WHak4wXDr ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Fft11mXCoc +++ mktemp ++ local LAST_ERR=/tmp/tmp.kp2Ds6cO2i ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Fft11mXCoc ++ cat /tmp/tmp.kp2Ds6cO2i ++ rm /tmp/tmp.Fft11mXCoc /tmp/tmp.kp2Ds6cO2i ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qMolMW5z7C +++ mktemp ++ local LAST_ERR=/tmp/tmp.He4zK0v3m4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qMolMW5z7C ++ cat /tmp/tmp.He4zK0v3m4 ++ rm /tmp/tmp.qMolMW5z7C /tmp/tmp.He4zK0v3m4 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JxrKtYTd4w +++ mktemp ++ local LAST_ERR=/tmp/tmp.w2hXcgy828 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JxrKtYTd4w ++ cat /tmp/tmp.w2hXcgy828 ++ rm /tmp/tmp.JxrKtYTd4w /tmp/tmp.w2hXcgy828 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KqdGpSrwiT +++ mktemp ++ local LAST_ERR=/tmp/tmp.rJq9qf1eci ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KqdGpSrwiT ++ cat /tmp/tmp.rJq9qf1eci ++ rm /tmp/tmp.KqdGpSrwiT /tmp/tmp.rJq9qf1eci ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 10 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-22757 -2nd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-22757 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-03-10T08:04:56+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-22757 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@some-name-mongos.pitr-physical-22757 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WjnRursycU +++ mktemp ++ local LAST_ERR=/tmp/tmp.1xwjgfk9V8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WjnRursycU ++ cat /tmp/tmp.1xwjgfk9V8 ++ rm /tmp/tmp.WjnRursycU /tmp/tmp.1xwjgfk9V8 ++ return 0 + local client_container=psmdb-client-bb8b97679-jsw4c + kubectl_bin exec psmdb-client-bb8b97679-jsw4c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.0799dL5nFS ++ mktemp + local LAST_ERR=/tmp/tmp.eLknLDWwek + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-jsw4c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0799dL5nFS + cat /tmp/tmp.eLknLDWwek + rm /tmp/tmp.0799dL5nFS /tmp/tmp.eLknLDWwek + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/pitr-physical/compare/find-2nd.json /tmp/tmp.uSIEEO8Dq3/find-2nd + run_backup backup-minio 3 physical + local name=backup-minio + local idx=3 + local type=physical + desc 'run backup backup-minio-3' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-3 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/pitr-physical/conf/backup-minio.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-3/' + /usr/sbin/sed -e 's/type:/type: physical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.HGNmgnT5pK ++ mktemp + local LAST_ERR=/tmp/tmp.a8VFoodo3q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HGNmgnT5pK perconaservermongodbbackup.psmdb.percona.com/backup-minio-3 created + cat /tmp/tmp.a8VFoodo3q + rm /tmp/tmp.HGNmgnT5pK /tmp/tmp.a8VFoodo3q + return 0 + wait_backup backup-minio-3 + local backup_name=backup-minio-3 + local target_state=ready + set +o xtrace waiting for backup-minio-3 to reach ready state............OK + sleep 5 + compare_latest_restorable_time some-name-rs0 backup-minio-3 + local cluster=some-name-rs0 + local backup_name=backup-minio-3 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.siKbRlzkfJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JxFzHotVFR +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.siKbRlzkfJ +++ cat /tmp/tmp.JxFzHotVFR +++ rm /tmp/tmp.siKbRlzkfJ /tmp/tmp.JxFzHotVFR +++ return 0 ++ first_timestamp=1773128777 ++ sleep 5 ++ [[ 1773128777 != '' ]] ++ [[ 1773128777 != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uhH1w7ZV2j ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KjZu9tNp8O +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.uhH1w7ZV2j +++ cat /tmp/tmp.KjZu9tNp8O +++ rm /tmp/tmp.uhH1w7ZV2j /tmp/tmp.KjZu9tNp8O +++ return 0 ++ second_timestamp=1773128777 ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 1773128777 != '' ]] ++ [[ 1773128777 != \n\u\l\l ]] ++ [[ 1773128777 == 1773128777 ]] ++ /usr/sbin/date -u -d @1773128777 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2026-03-10T07:46:17Z ++ get_latest_restorable_time_from_backup_object backup-minio-3 ++ local backup_name=backup-minio-3 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-3 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.SEIJ5aAJsX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.M4Qa2LEuF6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb-backup backup-minio-3 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.SEIJ5aAJsX +++ cat /tmp/tmp.M4Qa2LEuF6 +++ rm /tmp/tmp.SEIJ5aAJsX /tmp/tmp.M4Qa2LEuF6 +++ return 0 ++ latestRestorableTime=2026-03-10T07:46:17Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2026-03-10T07:46:17Z != '' ]] ++ [[ 2026-03-10T07:46:17Z != \n\u\l\l ]] ++ echo 2026-03-10T07:46:17Z + backup_time=2026-03-10T07:46:17Z + [[ 2026-03-10T07:46:17Z != \2\0\2\6\-\0\3\-\1\0\T\0\7\:\4\6\:\1\7\Z ]] + check_recovery backup-minio-3 latest '' -3rd some-name + local backup_name=backup-minio-3 + local restore_type=latest + local restore_date= + local cmp_postfix=-3rd + local cluster_name=some-name + local backupSource= ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ocyVEYTvPX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.debd303kPj +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ocyVEYTvPX +++ cat /tmp/tmp.debd303kPj +++ rm /tmp/tmp.ocyVEYTvPX /tmp/tmp.debd303kPj +++ return 0 ++ echo 1773128777 + local latest_ts=1773128777 + desc 'write more data before restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by latest ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.pitr-physical-22757 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.pitr-physical-22757 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7EWObyfTM7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.VrhRm4mKgV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7EWObyfTM7 ++ cat /tmp/tmp.VrhRm4mKgV ++ rm /tmp/tmp.7EWObyfTM7 /tmp/tmp.VrhRm4mKgV ++ return 0 + local client_container=psmdb-client-bb8b97679-jsw4c + kubectl_bin exec psmdb-client-bb8b97679-jsw4c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.3aBt0DTEKt ++ mktemp + local LAST_ERR=/tmp/tmp.e2YoHlXY0n + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-jsw4c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3aBt0DTEKt Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("451a1f55-8331-4e5f-baa7-3b4b21b4b532") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.e2YoHlXY0n + rm /tmp/tmp.3aBt0DTEKt /tmp/tmp.e2YoHlXY0n + return 0 + [[ -n '' ]] + desc 'Restoring to latest' + set +o xtrace ----------------------------------------------------------------------------------- Restoring to latest ----------------------------------------------------------------------------------- ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.O625CysAO1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iJnLKLShlN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.O625CysAO1 +++ cat /tmp/tmp.iJnLKLShlN +++ rm /tmp/tmp.O625CysAO1 /tmp/tmp.iJnLKLShlN +++ return 0 ++ echo 1773128777 + local current_ts=1773128777 + retries=0 + [[ 1773128777 -gt 1773128777 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.LMRy84wjtF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.O2kATiaWsl +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.LMRy84wjtF +++ cat /tmp/tmp.O2kATiaWsl +++ rm /tmp/tmp.LMRy84wjtF /tmp/tmp.O2kATiaWsl +++ return 0 ++ echo 1773128777 + latest_ts=1773128777 + retries=1 ++ format_date 1773128777 ++ local timestamp=1773128777 +++ TZ=UTC +++ /usr/sbin/date -d@1773128777 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:46:17 ++ format_date 1773128777 ++ local timestamp=1773128777 +++ TZ=UTC +++ /usr/sbin/date -d@1773128777 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:46:17 + echo 'Waiting for last oplog chunk (2026-03-10 07:46:17) to be 120 seconds older than starting chunk (2026-03-10 07:46:17)' Waiting for last oplog chunk (2026-03-10 07:46:17) to be 120 seconds older than starting chunk (2026-03-10 07:46:17) + sleep 10 + [[ 1773128777 -gt 1773128777 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3u7o7fcyOt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.N8mXdS1r0Q +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.3u7o7fcyOt +++ cat /tmp/tmp.N8mXdS1r0Q +++ rm /tmp/tmp.3u7o7fcyOt /tmp/tmp.N8mXdS1r0Q +++ return 0 ++ echo 1773128777 + latest_ts=1773128777 + retries=2 ++ format_date 1773128777 ++ local timestamp=1773128777 +++ TZ=UTC +++ /usr/sbin/date -d@1773128777 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:46:17 ++ format_date 1773128777 ++ local timestamp=1773128777 +++ TZ=UTC +++ /usr/sbin/date -d@1773128777 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:46:17 + echo 'Waiting for last oplog chunk (2026-03-10 07:46:17) to be 120 seconds older than starting chunk (2026-03-10 07:46:17)' Waiting for last oplog chunk (2026-03-10 07:46:17) to be 120 seconds older than starting chunk (2026-03-10 07:46:17) + sleep 10 + [[ 1773128777 -gt 1773128777 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.lUTcGBHQjL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fgIRfcwHxQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.lUTcGBHQjL +++ cat /tmp/tmp.fgIRfcwHxQ +++ rm /tmp/tmp.lUTcGBHQjL /tmp/tmp.fgIRfcwHxQ +++ return 0 ++ echo 1773128777 + latest_ts=1773128777 + retries=3 ++ format_date 1773128777 ++ local timestamp=1773128777 +++ TZ=UTC +++ /usr/sbin/date -d@1773128777 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:46:17 ++ format_date 1773128777 ++ local timestamp=1773128777 +++ TZ=UTC +++ /usr/sbin/date -d@1773128777 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:46:17 + echo 'Waiting for last oplog chunk (2026-03-10 07:46:17) to be 120 seconds older than starting chunk (2026-03-10 07:46:17)' Waiting for last oplog chunk (2026-03-10 07:46:17) to be 120 seconds older than starting chunk (2026-03-10 07:46:17) + sleep 10 + [[ 1773128777 -gt 1773128777 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zGXncvjaZm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.jideBdiICG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.zGXncvjaZm +++ cat /tmp/tmp.jideBdiICG +++ rm /tmp/tmp.zGXncvjaZm /tmp/tmp.jideBdiICG +++ return 0 ++ echo 1773128777 + latest_ts=1773128777 + retries=4 ++ format_date 1773128777 ++ local timestamp=1773128777 +++ TZ=UTC +++ /usr/sbin/date -d@1773128777 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:46:17 ++ format_date 1773128777 ++ local timestamp=1773128777 +++ TZ=UTC +++ /usr/sbin/date -d@1773128777 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:46:17 + echo 'Waiting for last oplog chunk (2026-03-10 07:46:17) to be 120 seconds older than starting chunk (2026-03-10 07:46:17)' Waiting for last oplog chunk (2026-03-10 07:46:17) to be 120 seconds older than starting chunk (2026-03-10 07:46:17) + sleep 10 + [[ 1773128777 -gt 1773128777 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.erP73xc9eK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FEsHdMMdD8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.erP73xc9eK +++ cat /tmp/tmp.FEsHdMMdD8 +++ rm /tmp/tmp.erP73xc9eK /tmp/tmp.FEsHdMMdD8 +++ return 0 ++ echo 1773128777 + latest_ts=1773128777 + retries=5 ++ format_date 1773128777 ++ local timestamp=1773128777 +++ TZ=UTC +++ /usr/sbin/date -d@1773128777 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:46:17 ++ format_date 1773128777 ++ local timestamp=1773128777 +++ TZ=UTC +++ /usr/sbin/date -d@1773128777 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:46:17 + echo 'Waiting for last oplog chunk (2026-03-10 07:46:17) to be 120 seconds older than starting chunk (2026-03-10 07:46:17)' Waiting for last oplog chunk (2026-03-10 07:46:17) to be 120 seconds older than starting chunk (2026-03-10 07:46:17) + sleep 10 + [[ 1773128777 -gt 1773128777 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ILpDJs2loW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cmYWWG2ciJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ILpDJs2loW +++ cat /tmp/tmp.cmYWWG2ciJ +++ rm /tmp/tmp.ILpDJs2loW /tmp/tmp.cmYWWG2ciJ +++ return 0 ++ echo 1773128777 + latest_ts=1773128777 + retries=6 ++ format_date 1773128777 ++ local timestamp=1773128777 +++ TZ=UTC +++ /usr/sbin/date -d@1773128777 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:46:17 ++ format_date 1773128777 ++ local timestamp=1773128777 +++ TZ=UTC +++ /usr/sbin/date -d@1773128777 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:46:17 + echo 'Waiting for last oplog chunk (2026-03-10 07:46:17) to be 120 seconds older than starting chunk (2026-03-10 07:46:17)' Waiting for last oplog chunk (2026-03-10 07:46:17) to be 120 seconds older than starting chunk (2026-03-10 07:46:17) + sleep 10 + [[ 1773128777 -gt 1773128777 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ local LAST_OUT=/tmp/tmp.nKanYDUvMr ++++ mktemp +++ local LAST_ERR=/tmp/tmp.g7P29THXBm +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.nKanYDUvMr +++ cat /tmp/tmp.g7P29THXBm +++ rm /tmp/tmp.nKanYDUvMr /tmp/tmp.g7P29THXBm +++ return 0 ++ echo 1773128777 + latest_ts=1773128777 + retries=7 ++ format_date 1773128777 ++ local timestamp=1773128777 +++ TZ=UTC +++ /usr/sbin/date -d@1773128777 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:46:17 ++ format_date 1773128777 ++ local timestamp=1773128777 +++ TZ=UTC +++ /usr/sbin/date -d@1773128777 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:46:17 + echo 'Waiting for last oplog chunk (2026-03-10 07:46:17) to be 120 seconds older than starting chunk (2026-03-10 07:46:17)' Waiting for last oplog chunk (2026-03-10 07:46:17) to be 120 seconds older than starting chunk (2026-03-10 07:46:17) + sleep 10 + [[ 1773128777 -gt 1773128777 ]] + [[ 7 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wjpC8ylTD5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fSv3elyUQR +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wjpC8ylTD5 +++ cat /tmp/tmp.fSv3elyUQR +++ rm /tmp/tmp.wjpC8ylTD5 /tmp/tmp.fSv3elyUQR +++ return 0 ++ echo 1773128777 + latest_ts=1773128777 + retries=8 ++ format_date 1773128777 ++ local timestamp=1773128777 +++ TZ=UTC +++ /usr/sbin/date -d@1773128777 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:46:17 ++ format_date 1773128777 ++ local timestamp=1773128777 +++ TZ=UTC +++ /usr/sbin/date -d@1773128777 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:46:17 + echo 'Waiting for last oplog chunk (2026-03-10 07:46:17) to be 120 seconds older than starting chunk (2026-03-10 07:46:17)' Waiting for last oplog chunk (2026-03-10 07:46:17) to be 120 seconds older than starting chunk (2026-03-10 07:46:17) + sleep 10 + [[ 1773128777 -gt 1773128777 ]] + [[ 8 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.tnIbzrIXXt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.dTknLnQBA5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.tnIbzrIXXt +++ cat /tmp/tmp.dTknLnQBA5 +++ rm /tmp/tmp.tnIbzrIXXt /tmp/tmp.dTknLnQBA5 +++ return 0 ++ echo 1773128777 + latest_ts=1773128777 + retries=9 ++ format_date 1773128777 ++ local timestamp=1773128777 +++ TZ=UTC +++ /usr/sbin/date -d@1773128777 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:46:17 ++ format_date 1773128777 ++ local timestamp=1773128777 +++ TZ=UTC +++ /usr/sbin/date -d@1773128777 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:46:17 + echo 'Waiting for last oplog chunk (2026-03-10 07:46:17) to be 120 seconds older than starting chunk (2026-03-10 07:46:17)' Waiting for last oplog chunk (2026-03-10 07:46:17) to be 120 seconds older than starting chunk (2026-03-10 07:46:17) + sleep 10 + [[ 1773128777 -gt 1773128777 ]] + [[ 9 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.x1hETJlqPV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Efnmd8dLF1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.x1hETJlqPV +++ cat /tmp/tmp.Efnmd8dLF1 +++ rm /tmp/tmp.x1hETJlqPV /tmp/tmp.Efnmd8dLF1 +++ return 0 ++ echo 1773130035 + latest_ts=1773130035 + retries=10 ++ format_date 1773130035 ++ local timestamp=1773130035 +++ TZ=UTC +++ /usr/sbin/date -d@1773130035 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 08:07:15 ++ format_date 1773128777 ++ local timestamp=1773128777 +++ TZ=UTC +++ /usr/sbin/date -d@1773128777 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-03-10 07:46:17 + echo 'Waiting for last oplog chunk (2026-03-10 08:07:15) to be 120 seconds older than starting chunk (2026-03-10 07:46:17)' Waiting for last oplog chunk (2026-03-10 08:07:15) to be 120 seconds older than starting chunk (2026-03-10 07:46:17) + sleep 10 + [[ 1773130035 -gt 1773128777 ]] + '[' -z '' ']' + desc 'check restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- check restore by latest ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/pitr-physical/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-3/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio-3/' + /usr/sbin/sed -e /backupSource/,+8d + /usr/sbin/sed -e 's/pitrType:/type: latest/' + kubectl_bin apply -f - + '[' -z '' ']' ++ mktemp + /usr/sbin/sed -e /date:/d + local LAST_OUT=/tmp/tmp.J6hOCff3A0 ++ mktemp + local LAST_ERR=/tmp/tmp.WzvAgGwEDK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.J6hOCff3A0 perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-3 created + cat /tmp/tmp.WzvAgGwEDK + rm /tmp/tmp.J6hOCff3A0 /tmp/tmp.WzvAgGwEDK + return 0 + wait_restore backup-minio-3 some-name requested 0 1200 + local backup_name=backup-minio-3 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-3 object to be created.OK Waiting psmdb-restore/restore-backup-minio-3 to reach state "requested" ..............OK after 13 minutes + [[ 0 -eq 1 ]] + echo + wait_restore backup-minio-3 some-name ready 0 1600 + local backup_name=backup-minio-3 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1600 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-3 object to be created.OK Waiting psmdb-restore/restore-backup-minio-3 to reach state "ready" ...OK after 2 minutes + [[ 0 -eq 1 ]] + echo + set -o xtrace + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MCW2W9bSEe +++ mktemp ++ local LAST_ERR=/tmp/tmp.OcpxnUt2H3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MCW2W9bSEe ++ cat /tmp/tmp.OcpxnUt2H3 ++ rm /tmp/tmp.MCW2W9bSEe /tmp/tmp.OcpxnUt2H3 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready..............OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.twlCiLcLWD +++ mktemp ++ local LAST_ERR=/tmp/tmp.zmSiEUs3Uy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.twlCiLcLWD ++ cat /tmp/tmp.zmSiEUs3Uy ++ rm /tmp/tmp.twlCiLcLWD /tmp/tmp.zmSiEUs3Uy ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.a9SNOq8QAW +++ mktemp ++ local LAST_ERR=/tmp/tmp.SXGv8MBzLC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.a9SNOq8QAW ++ cat /tmp/tmp.SXGv8MBzLC ++ rm /tmp/tmp.a9SNOq8QAW /tmp/tmp.SXGv8MBzLC ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness............ + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MGQIxeHnTN +++ mktemp ++ local LAST_ERR=/tmp/tmp.EnZGM4jXCi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MGQIxeHnTN ++ cat /tmp/tmp.EnZGM4jXCi ++ rm /tmp/tmp.MGQIxeHnTN /tmp/tmp.EnZGM4jXCi ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aSyMUgyLLz +++ mktemp ++ local LAST_ERR=/tmp/tmp.RnDSckrAsV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aSyMUgyLLz ++ cat /tmp/tmp.RnDSckrAsV ++ rm /tmp/tmp.aSyMUgyLLz /tmp/tmp.RnDSckrAsV ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jdMm5oqwxD +++ mktemp ++ local LAST_ERR=/tmp/tmp.jnBrYAGJlQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jdMm5oqwxD ++ cat /tmp/tmp.jnBrYAGJlQ ++ rm /tmp/tmp.jdMm5oqwxD /tmp/tmp.jnBrYAGJlQ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RQ7ZjqINpN +++ mktemp ++ local LAST_ERR=/tmp/tmp.3oDii4w3xy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RQ7ZjqINpN ++ cat /tmp/tmp.3oDii4w3xy ++ rm /tmp/tmp.RQ7ZjqINpN /tmp/tmp.3oDii4w3xy ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dtgxU78IQ2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.bv8Tn6afUB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dtgxU78IQ2 ++ cat /tmp/tmp.bv8Tn6afUB ++ rm /tmp/tmp.dtgxU78IQ2 /tmp/tmp.bv8Tn6afUB ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6Vu2tdZ7DO +++ mktemp ++ local LAST_ERR=/tmp/tmp.P3dUTXvFmo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6Vu2tdZ7DO ++ cat /tmp/tmp.P3dUTXvFmo ++ rm /tmp/tmp.6Vu2tdZ7DO /tmp/tmp.P3dUTXvFmo ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 10 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-physical-22757 -3rd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-physical-22757 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-03-10T08:28:25+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-physical-22757 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-physical-22757 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DLxxWWgrzj +++ mktemp ++ local LAST_ERR=/tmp/tmp.Smxc6CIr9t ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DLxxWWgrzj ++ cat /tmp/tmp.Smxc6CIr9t ++ rm /tmp/tmp.DLxxWWgrzj /tmp/tmp.Smxc6CIr9t ++ return 0 + local client_container=psmdb-client-bb8b97679-jsw4c + kubectl_bin exec psmdb-client-bb8b97679-jsw4c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.Qjv8SID7Iw ++ mktemp + local LAST_ERR=/tmp/tmp.NZJVEoLoGD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-jsw4c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-physical-22757.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Qjv8SID7Iw + cat /tmp/tmp.NZJVEoLoGD + rm /tmp/tmp.Qjv8SID7Iw /tmp/tmp.NZJVEoLoGD + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/pitr-physical/compare/find-3rd.json /tmp/tmp.uSIEEO8Dq3/find-3rd + destroy pitr-physical-22757 + local namespace=pitr-physical-22757 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.49dPlC8JaO +++ mktemp ++ local LAST_ERR=/tmp/tmp.yF3PXRc4kd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.49dPlC8JaO ++ cat /tmp/tmp.yF3PXRc4kd ++ rm /tmp/tmp.49dPlC8JaO /tmp/tmp.yF3PXRc4kd ++ return 0 + '[' 2 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.QJaSngvWK3 ++ mktemp + local LAST_ERR=/tmp/tmp.3LIaNXujDv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QJaSngvWK3 NAME CLUSTER STORAGE DESTINATION TYPE SIZE STATUS COMPLETED AGE backup-minio-2 some-name minio s3://operator-testing/2026-03-10T07:40:08Z physical 6.13MB ready 48m 48m backup-minio-3 some-name minio s3://operator-testing/2026-03-10T08:05:01Z physical 3.65MB ready 23m 23m + cat /tmp/tmp.3LIaNXujDv + rm /tmp/tmp.QJaSngvWK3 /tmp/tmp.3LIaNXujDv + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.a2rGJpEFKb ++ mktemp + local LAST_ERR=/tmp/tmp.Es7hABK6JQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.a2rGJpEFKb perconaservermongodbbackup.psmdb.percona.com "backup-minio-2" deleted from pitr-physical-22757 namespace perconaservermongodbbackup.psmdb.percona.com "backup-minio-3" deleted from pitr-physical-22757 namespace + cat /tmp/tmp.Es7hABK6JQ + rm /tmp/tmp.a2rGJpEFKb /tmp/tmp.Es7hABK6JQ + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.QzKXNthBEJ ++ mktemp + local LAST_ERR=/tmp/tmp.ImK7zWdRdu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QzKXNthBEJ customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.ImK7zWdRdu + rm /tmp/tmp.QzKXNthBEJ /tmp/tmp.ImK7zWdRdu + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.jXYDAb9Wc2 ++ mktemp + local LAST_ERR=/tmp/tmp.LWq3EVOUxs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jXYDAb9Wc2 + cat /tmp/tmp.LWq3EVOUxs + rm /tmp/tmp.jXYDAb9Wc2 /tmp/tmp.LWq3EVOUxs + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.zmtXWOZYhw ++ mktemp + local LAST_ERR=/tmp/tmp.aXS7GU9bde + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zmtXWOZYhw + cat /tmp/tmp.aXS7GU9bde + rm /tmp/tmp.zmtXWOZYhw /tmp/tmp.aXS7GU9bde + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.wNn19GUmPT ++ mktemp + local LAST_ERR=/tmp/tmp.xUiZ2tfrZU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wNn19GUmPT + cat /tmp/tmp.xUiZ2tfrZU + rm /tmp/tmp.wNn19GUmPT /tmp/tmp.xUiZ2tfrZU + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.68rrzCdGJF ++ mktemp + local LAST_ERR=/tmp/tmp.vqe3XALZuL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.68rrzCdGJF clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.vqe3XALZuL + rm /tmp/tmp.68rrzCdGJF /tmp/tmp.vqe3XALZuL + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.rL9OFzA4QX ++ mktemp + local LAST_ERR=/tmp/tmp.0wgFyOzhHa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.rL9OFzA4QX + cat /tmp/tmp.0wgFyOzhHa Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.rL9OFzA4QX + cat /tmp/tmp.0wgFyOzhHa Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.rL9OFzA4QX + cat /tmp/tmp.0wgFyOzhHa Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.rL9OFzA4QX + cat /tmp/tmp.0wgFyOzhHa Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.rL9OFzA4QX /tmp/tmp.0wgFyOzhHa + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace pitr-physical-22757 + rm -rf /tmp/tmp.uSIEEO8Dq3 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace + local LAST_OUT=/tmp/tmp.QyYoKf0RSF ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp + local LAST_OUT=/tmp/tmp.jGGdbJe1Dg ++ mktemp + local LAST_ERR=/tmp/tmp.OMrwhrjUqZ + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.Hcw8lUnjxL + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace pitr-physical-22757 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator