Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/logs/pitr-sharded.log Warning: version difference between client (1.36) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.36) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.36) and server (1.32) exceeds the supported minor version skew of +/-1 + main + create_infra pitr-sharded-13591 + local ns=pitr-sharded-13591 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.o1D3Be0Hfs ++ mktemp + local LAST_ERR=/tmp/tmp.rAQIEjqxX0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.o1D3Be0Hfs customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.rAQIEjqxX0 + rm /tmp/tmp.o1D3Be0Hfs /tmp/tmp.rAQIEjqxX0 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.EYSSXzNPP1 ++ mktemp + local LAST_ERR=/tmp/tmp.Mdh1breSey + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EYSSXzNPP1 + cat /tmp/tmp.Mdh1breSey + rm /tmp/tmp.EYSSXzNPP1 /tmp/tmp.Mdh1breSey + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.bffELStG0p ++ mktemp + local LAST_ERR=/tmp/tmp.wBKHeHxV0F + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bffELStG0p + cat /tmp/tmp.wBKHeHxV0F + rm /tmp/tmp.bffELStG0p /tmp/tmp.wBKHeHxV0F + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ZWXo5UAfDb ++ mktemp + local LAST_ERR=/tmp/tmp.ZrekbQIMyE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZWXo5UAfDb + cat /tmp/tmp.ZrekbQIMyE + rm /tmp/tmp.ZWXo5UAfDb /tmp/tmp.ZrekbQIMyE + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.lxYtABhwpC ++ mktemp + local LAST_ERR=/tmp/tmp.sMtmEI9YRI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lxYtABhwpC clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.sMtmEI9YRI + rm /tmp/tmp.lxYtABhwpC /tmp/tmp.sMtmEI9YRI + return 0 + check_crd_for_deletion PR-2269-20e73be3 + local git_tag=PR-2269-20e73be3 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2269-20e73be3/deploy/crd.yaml ++ /usr/sbin/sed s/---//g ++ yq eval .metadata.name ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cdmYidn11U +++ mktemp ++ local LAST_ERR=/tmp/tmp.wiBkOl74wN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.cdmYidn11U ++ cat /tmp/tmp.wiBkOl74wN Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.cdmYidn11U ++ cat /tmp/tmp.wiBkOl74wN Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.cdmYidn11U ++ cat /tmp/tmp.wiBkOl74wN Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.cdmYidn11U ++ cat /tmp/tmp.wiBkOl74wN Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.cdmYidn11U /tmp/tmp.wiBkOl74wN ++ return 1 + [[ '' == Terminating ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + awk '{print$1}' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- ++ mktemp + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.2LbHv1dgYE + local LAST_OUT=/tmp/tmp.bpmEDw7mKV ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.CtsoVrW9wf + local exit_status=0 + local LAST_ERR=/tmp/tmp.mkkhkFyWXk + local timeout=4 + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bpmEDw7mKV + cat /tmp/tmp.CtsoVrW9wf + rm /tmp/tmp.bpmEDw7mKV /tmp/tmp.CtsoVrW9wf + return 0 namespace "pitr-sharded-11570" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2LbHv1dgYE namespace "psmdb-operator" deleted + cat /tmp/tmp.mkkhkFyWXk + rm /tmp/tmp.2LbHv1dgYE /tmp/tmp.mkkhkFyWXk + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ddycWai4Dg ++ mktemp + local LAST_ERR=/tmp/tmp.Yt1Pos1Cnv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ddycWai4Dg + cat /tmp/tmp.Yt1Pos1Cnv + rm /tmp/tmp.ddycWai4Dg /tmp/tmp.Yt1Pos1Cnv + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.QqNxkYq6ou ++ mktemp + local LAST_ERR=/tmp/tmp.Y1dGxWzYk7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QqNxkYq6ou namespace/psmdb-operator created + cat /tmp/tmp.Y1dGxWzYk7 + rm /tmp/tmp.QqNxkYq6ou /tmp/tmp.Y1dGxWzYk7 + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.fLJaOfDsAP +++ mktemp ++ local LAST_ERR=/tmp/tmp.JrlyqGmoO8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fLJaOfDsAP ++ cat /tmp/tmp.JrlyqGmoO8 ++ rm /tmp/tmp.fLJaOfDsAP /tmp/tmp.JrlyqGmoO8 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2269-20e73be3-5-cluster7 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.8yp9wAZD4N ++ mktemp + local LAST_ERR=/tmp/tmp.lAYXluCdeH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2269-20e73be3-5-cluster7 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8yp9wAZD4N Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2269-20e73be3-5-cluster7" modified. + cat /tmp/tmp.lAYXluCdeH + rm /tmp/tmp.8yp9wAZD4N /tmp/tmp.lAYXluCdeH + return 0 + deploy_operator + desc 'start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2269-20e73be3' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2269-20e73be3 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.TlrzvbksUp ++ mktemp + local LAST_ERR=/tmp/tmp.7xezZqweou + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TlrzvbksUp customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.7xezZqweou + rm /tmp/tmp.TlrzvbksUp /tmp/tmp.7xezZqweou + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Ir95NvmMj4 ++ mktemp + local LAST_ERR=/tmp/tmp.gOU8veBB2Q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ir95NvmMj4 clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.gOU8veBB2Q + rm /tmp/tmp.Ir95NvmMj4 /tmp/tmp.gOU8veBB2Q + return 0 + yq eval $'\n\t\t\t(.spec.template.spec.containers[].image = "docker.io/perconalab/percona-server-mongodb-operator:PR-2269-20e73be3") |\n\t\t\t((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") |\n\t\t\t((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.elygYxXlCz ++ mktemp + local LAST_ERR=/tmp/tmp.R7LMLXWY0G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.elygYxXlCz deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.R7LMLXWY0G + rm /tmp/tmp.elygYxXlCz /tmp/tmp.R7LMLXWY0G + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.RwPynJsYht +++ mktemp ++ local LAST_ERR=/tmp/tmp.dTStDe9fqf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RwPynJsYht ++ cat /tmp/tmp.dTStDe9fqf ++ rm /tmp/tmp.RwPynJsYht /tmp/tmp.dTStDe9fqf ++ return 0 + wait_operator_pod percona-server-mongodb-operator-dfddcf789-t24vg + local pod=percona-server-mongodb-operator-dfddcf789-t24vg + set +o xtrace waiting for pod/percona-server-mongodb-operator-dfddcf789-t24vg to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.vmvqLH2Z73 +++ mktemp ++ local LAST_ERR=/tmp/tmp.2RmSERWWsw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vmvqLH2Z73 ++ cat /tmp/tmp.2RmSERWWsw ++ rm /tmp/tmp.vmvqLH2Z73 /tmp/tmp.2RmSERWWsw ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-dfddcf789-t24vg ++ mktemp + local LAST_OUT=/tmp/tmp.5xG9Wq4d4C ++ mktemp + local LAST_ERR=/tmp/tmp.q6ju1E0rgp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-dfddcf789-t24vg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5xG9Wq4d4C + cat /tmp/tmp.q6ju1E0rgp + rm /tmp/tmp.5xG9Wq4d4C /tmp/tmp.q6ju1E0rgp + return 0 2026-04-23T09:53:53.052Z INFO setup Manager starting up {"gitCommit": "20e73be336bc9107fc60a16452f9669066123774", "gitBranch": "PR-2269-20e73be3", "buildTime": "", "goVersion": "go1.25.9", "os": "linux", "arch": "amd64"} + create_namespace pitr-sharded-13591 + local namespace=pitr-sharded-13591 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// ++ tail -n1 ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl api-resources ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces pitr-sharded-13591' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pitr-sharded-13591 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pitr-sharded-13591 --ignore-not-found ++ mktemp + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.MDYz7qs0oT ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.Kg3b8CQh9a + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.X1gNy6t5rl + for i in $(seq 0 2) + set +e + kubectl get ns ++ mktemp + local LAST_ERR=/tmp/tmp.r1MwfiY8OV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace pitr-sharded-13591 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MDYz7qs0oT + cat /tmp/tmp.Kg3b8CQh9a + rm /tmp/tmp.MDYz7qs0oT /tmp/tmp.Kg3b8CQh9a + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.X1gNy6t5rl + cat /tmp/tmp.r1MwfiY8OV + rm /tmp/tmp.X1gNy6t5rl /tmp/tmp.r1MwfiY8OV + return 0 + kubectl_bin wait --for=delete namespace pitr-sharded-13591 ++ mktemp + local LAST_OUT=/tmp/tmp.X3H4N4H5Yj ++ mktemp + local LAST_ERR=/tmp/tmp.Vk3YqCvrUm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace pitr-sharded-13591 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.X3H4N4H5Yj + cat /tmp/tmp.Vk3YqCvrUm + rm /tmp/tmp.X3H4N4H5Yj /tmp/tmp.Vk3YqCvrUm + return 0 + desc 'create namespace pitr-sharded-13591' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pitr-sharded-13591 ----------------------------------------------------------------------------------- + kubectl_bin create namespace pitr-sharded-13591 ++ mktemp + local LAST_OUT=/tmp/tmp.uqzbCAlPWd ++ mktemp + local LAST_ERR=/tmp/tmp.xOdrQYYvKw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace pitr-sharded-13591 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uqzbCAlPWd namespace/pitr-sharded-13591 created + cat /tmp/tmp.xOdrQYYvKw + rm /tmp/tmp.uqzbCAlPWd /tmp/tmp.xOdrQYYvKw + return 0 + set_kube_ctx pitr-sharded-13591 + local namespace=pitr-sharded-13591 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.yKFxdoBjFa +++ mktemp ++ local LAST_ERR=/tmp/tmp.NDBTlDWV6a ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yKFxdoBjFa ++ cat /tmp/tmp.NDBTlDWV6a ++ rm /tmp/tmp.yKFxdoBjFa /tmp/tmp.NDBTlDWV6a ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2269-20e73be3-5-cluster7 --namespace=pitr-sharded-13591 ++ mktemp + local LAST_OUT=/tmp/tmp.g0KpxA7NUq ++ mktemp + local LAST_ERR=/tmp/tmp.7bcMd9om6r + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2269-20e73be3-5-cluster7 --namespace=pitr-sharded-13591 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g0KpxA7NUq Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2269-20e73be3-5-cluster7" modified. + cat /tmp/tmp.7bcMd9om6r + rm /tmp/tmp.g0KpxA7NUq /tmp/tmp.7bcMd9om6r + return 0 + deploy_minio + local cert_secret= + local service_name=minio-service + desc 'install MinIO: minio-service' + set +o xtrace ----------------------------------------------------------------------------------- install MinIO: minio-service ----------------------------------------------------------------------------------- + helm uninstall minio-service + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + local endpoint=http://minio-service:9000 + minio_args=('--version' '5.4.0' '--set' 'replicas=1' '--set' 'mode=standalone' '--set' 'resources.requests.memory=256Mi' '--set' 'rootUser=rootuser' '--set' 'rootPassword=rootpass123' '--set' 'users[0].accessKey=some-access-key' '--set' 'users[0].secretKey=some-secret-key' '--set' 'users[0].policy=consoleAdmin' '--set' 'service.type=ClusterIP' '--set' 'configPathmc=/tmp/' '--set' 'securityContext.enabled=false' '--set' 'persistence.size=2G' '--set' 'fullnameOverride=minio-service' '--set' 'serviceAccount.create=true' '--set' 'serviceAccount.name=minio-service-sa') + local minio_args + [[ -n '' ]] + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio NAME: minio-service LAST DEPLOYED: Thu Apr 23 09:54:32 2026 NAMESPACE: pitr-sharded-13591 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.pitr-sharded-13591.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace pitr-sharded-13591 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace pitr-sharded-13591 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace pitr-sharded-13591 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace pitr-sharded-13591 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.a8RBdKXjv8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.2IXU6tnAsD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.a8RBdKXjv8 ++ cat /tmp/tmp.2IXU6tnAsD ++ rm /tmp/tmp.a8RBdKXjv8 /tmp/tmp.2IXU6tnAsD ++ return 0 + local MINIO_POD=minio-service-6d5f646cdc-4flz8 + wait_pod minio-service-6d5f646cdc-4flz8 + local pod=minio-service-6d5f646cdc-4flz8 + set +o xtrace waiting for pod/minio-service-6d5f646cdc-4flz8 to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-sharded-13591.svc.cluster.local --tcp=9000 service/minio-service created + create_minio_bucket operator-testing http://minio-service:9000 + local bucket=operator-testing + local endpoint=http://minio-service:9000 + kubectl_bin run -i --rm aws-cli --image=docker.io/perconalab/awscli --restart=Never -- bash -c $'AWS_ACCESS_KEY_ID=some-access-key \t\tAWS_SECRET_ACCESS_KEY=some-secret-key \t\tAWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.rw6v4idyvV ++ mktemp + local LAST_ERR=/tmp/tmp.i1JtvGJtWo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=docker.io/perconalab/awscli --restart=Never -- bash -c $'AWS_ACCESS_KEY_ID=some-access-key \t\tAWS_SECRET_ACCESS_KEY=some-secret-key \t\tAWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rw6v4idyvV make_bucket: operator-testing make_bucket: operator-testing pod "aws-cli" deleted from pitr-sharded-13591 namespace + cat /tmp/tmp.i1JtvGJtWo All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: unable to upgrade connection: container aws-cli not found in pod aws-cli_pitr-sharded-13591 + rm /tmp/tmp.rw6v4idyvV /tmp/tmp.i1JtvGJtWo + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.O8pT1FVqvy ++ mktemp + local LAST_ERR=/tmp/tmp.6COYRsU9Jt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.O8pT1FVqvy secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.6COYRsU9Jt + rm /tmp/tmp.O8pT1FVqvy /tmp/tmp.6COYRsU9Jt + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.WgU3Uz850l ++ mktemp + local LAST_ERR=/tmp/tmp.VpRJfb0G3v + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WgU3Uz850l secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created secret/gcp-cs-sa-key-secret created + cat /tmp/tmp.VpRJfb0G3v + rm /tmp/tmp.WgU3Uz850l /tmp/tmp.VpRJfb0G3v + return 0 + desc 'create custom RuntimeClass' + set +o xtrace ----------------------------------------------------------------------------------- create custom RuntimeClass ----------------------------------------------------------------------------------- + version_gt 1.19 ++ echo '1.32 >= 1.19' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' 0 -ne 1 ']' + /usr/sbin/sed s/docker/runc/g + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/conf/container-rc.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.B9Ycy8YJol ++ mktemp + local LAST_ERR=/tmp/tmp.65LHELeZBr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.B9Ycy8YJol runtimeclass.node.k8s.io/container-rc unchanged + cat /tmp/tmp.65LHELeZBr + rm /tmp/tmp.B9Ycy8YJol /tmp/tmp.65LHELeZBr + return 0 + cluster=some-name + desc 'create first PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name ----------------------------------------------------------------------------------- + '[' 0 -eq 0 ']' + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/conf/some-name-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/conf/some-name-rs0.yml + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2269-20e73be3"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/pitr-sharded-13591/g + local LAST_OUT=/tmp/tmp.NnEWBfwjh2 ++ mktemp + local LAST_ERR=/tmp/tmp.8GJH9HknC1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NnEWBfwjh2 perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.8GJH9HknC1 + rm /tmp/tmp.NnEWBfwjh2 /tmp/tmp.8GJH9HknC1 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.....OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RzqmeAPxn8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.wM6LihgJfR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RzqmeAPxn8 ++ cat /tmp/tmp.wM6LihgJfR ++ rm /tmp/tmp.RzqmeAPxn8 /tmp/tmp.wM6LihgJfR ++ return 0 + [[ '' == true ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RxmGbRhrBA +++ mktemp ++ local LAST_ERR=/tmp/tmp.93gfqvyDrF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RxmGbRhrBA ++ cat /tmp/tmp.93gfqvyDrF ++ rm /tmp/tmp.RxmGbRhrBA /tmp/tmp.93gfqvyDrF ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.02j2jJEhwo +++ mktemp ++ local LAST_ERR=/tmp/tmp.eEAY7R3rV0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.02j2jJEhwo ++ cat /tmp/tmp.eEAY7R3rV0 ++ rm /tmp/tmp.02j2jJEhwo /tmp/tmp.eEAY7R3rV0 ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness..................................................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pDKDcadM2t +++ mktemp ++ local LAST_ERR=/tmp/tmp.NreoGIlPVl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pDKDcadM2t ++ cat /tmp/tmp.NreoGIlPVl ++ rm /tmp/tmp.pDKDcadM2t /tmp/tmp.NreoGIlPVl ++ return 0 + [[ '' == true ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.u27herv2PL +++ mktemp ++ local LAST_ERR=/tmp/tmp.f0hjIQLaaF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.u27herv2PL ++ cat /tmp/tmp.f0hjIQLaaF ++ rm /tmp/tmp.u27herv2PL /tmp/tmp.f0hjIQLaaF ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.m87R1GvMPl +++ mktemp ++ local LAST_ERR=/tmp/tmp.2UayDYFNIO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.m87R1GvMPl ++ cat /tmp/tmp.2UayDYFNIO ++ rm /tmp/tmp.m87R1GvMPl /tmp/tmp.2UayDYFNIO ++ return 0 + [[ '' == true ]] + sleep 10 + [[ false == true ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MXUCYZbpQ4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Bhu9j48W1b ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MXUCYZbpQ4 ++ cat /tmp/tmp.Bhu9j48W1b ++ rm /tmp/tmp.MXUCYZbpQ4 /tmp/tmp.Bhu9j48W1b ++ return 0 + [[ ready == ready ]] + echo .OK .OK + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.CfvgmtWHfZ/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval $'\n\t\t\tdel(.metadata.ownerReferences[].apiVersion) |\n\t\t\tdel(.metadata.managedFields) |\n\t\t\tdel(.. | select(has("creationTimestamp")).creationTimestamp) |\n\t\t\tdel(.. | select(has("namespace")).namespace) |\n\t\t\tdel(.. | select(has("uid")).uid) |\n\t\t\tdel(.metadata.resourceVersion) |\n\t\t\tdel(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) |\n\t\t\tdel(.metadata.selfLink) |\n\t\t\tdel(.metadata.annotations."cloud.google.com/neg") |\n\t\t\tdel(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") |\n\t\t\tdel(.. | select(has("image")).image) |\n\t\t\tdel(.. | select(has("clusterIP")).clusterIP) |\n\t\t\tdel(.. | select(has("clusterIPs")).clusterIPs) |\n\t\t\tdel(.. | select(has("dataSource")).dataSource) |\n\t\t\tdel(.. | select(has("procMount")).procMount) |\n\t\t\tdel(.. | select(has("storageClassName")).storageClassName) |\n\t\t\tdel(.. | select(has("finalizers")).finalizers) |\n\t\t\tdel(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") |\n\t\t\tdel(.. | select(has("volumeName")).volumeName) |\n\t\t\tdel(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.spec.volumeMode) |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") |\n\t\t\tdel(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") |\n\t\t\tdel(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") |\n\t\t\tdel(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) |\n\t\t\tdel(.. | select(has("healthCheckNodePort")).healthCheckNodePort) |\n\t\t\tdel(.. | select(has("nodePort")).nodePort) |\n\t\t\tdel(.status) |\n\t\t\t(.. | select(tag == "!!str")) |= sub("pitr-sharded-13591", "NAME_SPACE") |\n\t\t\tdel(.spec.volumeClaimTemplates[].apiVersion) |\n\t\t\tdel(.spec.volumeClaimTemplates[].kind) |\n\t\t\tdel(.spec.ipFamilies) |\n\t\t\tdel(.spec.ipFamilyPolicy) |\n\t\t\t(.. | select(. == "extensions/v1beta1")) = "apps/v1" |\n\t\t\t(.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.QtVdtYc6bs ++ mktemp + local LAST_ERR=/tmp/tmp.Z5pepSH1JK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QtVdtYc6bs + cat /tmp/tmp.Z5pepSH1JK + rm /tmp/tmp.QtVdtYc6bs /tmp/tmp.Z5pepSH1JK + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs0.yml /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2026-04-23T09:59:01+0000] compare_kubectl: statefulset/some-name-rs0 OK + compare_kubectl statefulset/some-name-rs1 + local resource=statefulset/some-name-rs1 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs1.yml + local new_result=/tmp/tmp.CfvgmtWHfZ/statefulset_some-name-rs1.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs1-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs1 + yq eval $'\n\t\t\tdel(.metadata.ownerReferences[].apiVersion) |\n\t\t\tdel(.metadata.managedFields) |\n\t\t\tdel(.. | select(has("creationTimestamp")).creationTimestamp) |\n\t\t\tdel(.. | select(has("namespace")).namespace) |\n\t\t\tdel(.. | select(has("uid")).uid) |\n\t\t\tdel(.metadata.resourceVersion) |\n\t\t\tdel(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) |\n\t\t\tdel(.metadata.selfLink) |\n\t\t\tdel(.metadata.annotations."cloud.google.com/neg") |\n\t\t\tdel(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") |\n\t\t\tdel(.. | select(has("image")).image) |\n\t\t\tdel(.. | select(has("clusterIP")).clusterIP) |\n\t\t\tdel(.. | select(has("clusterIPs")).clusterIPs) |\n\t\t\tdel(.. | select(has("dataSource")).dataSource) |\n\t\t\tdel(.. | select(has("procMount")).procMount) |\n\t\t\tdel(.. | select(has("storageClassName")).storageClassName) |\n\t\t\tdel(.. | select(has("finalizers")).finalizers) |\n\t\t\tdel(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") |\n\t\t\tdel(.. | select(has("volumeName")).volumeName) |\n\t\t\tdel(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.spec.volumeMode) |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") |\n\t\t\tdel(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") |\n\t\t\tdel(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") |\n\t\t\tdel(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) |\n\t\t\tdel(.. | select(has("healthCheckNodePort")).healthCheckNodePort) |\n\t\t\tdel(.. | select(has("nodePort")).nodePort) |\n\t\t\tdel(.status) |\n\t\t\t(.. | select(tag == "!!str")) |= sub("pitr-sharded-13591", "NAME_SPACE") |\n\t\t\tdel(.spec.volumeClaimTemplates[].apiVersion) |\n\t\t\tdel(.spec.volumeClaimTemplates[].kind) |\n\t\t\tdel(.spec.ipFamilies) |\n\t\t\tdel(.spec.ipFamilyPolicy) |\n\t\t\t(.. | select(. == "extensions/v1beta1")) = "apps/v1" |\n\t\t\t(.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.qyD5UcWIGo ++ mktemp + local LAST_ERR=/tmp/tmp.ZTmNnEHRAq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs1 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qyD5UcWIGo + cat /tmp/tmp.ZTmNnEHRAq + rm /tmp/tmp.qyD5UcWIGo /tmp/tmp.ZTmNnEHRAq + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-rs1.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-rs1.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-rs1.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs1.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs1.yml /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-rs1.yml + log 'compare_kubectl: statefulset/some-name-rs1 OK' + set +o xtrace [2026-04-23T09:59:02+0000] compare_kubectl: statefulset/some-name-rs1 OK + compare_kubectl statefulset/some-name-rs2 + local resource=statefulset/some-name-rs2 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs2.yml + local new_result=/tmp/tmp.CfvgmtWHfZ/statefulset_some-name-rs2.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs2-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs2 + yq eval $'\n\t\t\tdel(.metadata.ownerReferences[].apiVersion) |\n\t\t\tdel(.metadata.managedFields) |\n\t\t\tdel(.. | select(has("creationTimestamp")).creationTimestamp) |\n\t\t\tdel(.. | select(has("namespace")).namespace) |\n\t\t\tdel(.. | select(has("uid")).uid) |\n\t\t\tdel(.metadata.resourceVersion) |\n\t\t\tdel(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) |\n\t\t\tdel(.metadata.selfLink) |\n\t\t\tdel(.metadata.annotations."cloud.google.com/neg") |\n\t\t\tdel(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") |\n\t\t\tdel(.. | select(has("image")).image) |\n\t\t\tdel(.. | select(has("clusterIP")).clusterIP) |\n\t\t\tdel(.. | select(has("clusterIPs")).clusterIPs) |\n\t\t\tdel(.. | select(has("dataSource")).dataSource) |\n\t\t\tdel(.. | select(has("procMount")).procMount) |\n\t\t\tdel(.. | select(has("storageClassName")).storageClassName) |\n\t\t\tdel(.. | select(has("finalizers")).finalizers) |\n\t\t\tdel(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") |\n\t\t\tdel(.. | select(has("volumeName")).volumeName) |\n\t\t\tdel(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.spec.volumeMode) |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") |\n\t\t\tdel(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") |\n\t\t\tdel(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") |\n\t\t\tdel(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) |\n\t\t\tdel(.. | select(has("healthCheckNodePort")).healthCheckNodePort) |\n\t\t\tdel(.. | select(has("nodePort")).nodePort) |\n\t\t\tdel(.status) |\n\t\t\t(.. | select(tag == "!!str")) |= sub("pitr-sharded-13591", "NAME_SPACE") |\n\t\t\tdel(.spec.volumeClaimTemplates[].apiVersion) |\n\t\t\tdel(.spec.volumeClaimTemplates[].kind) |\n\t\t\tdel(.spec.ipFamilies) |\n\t\t\tdel(.spec.ipFamilyPolicy) |\n\t\t\t(.. | select(. == "extensions/v1beta1")) = "apps/v1" |\n\t\t\t(.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.BWDUPpo4CN ++ mktemp + local LAST_ERR=/tmp/tmp.kHXiauoab3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs2 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BWDUPpo4CN + cat /tmp/tmp.kHXiauoab3 + rm /tmp/tmp.BWDUPpo4CN /tmp/tmp.kHXiauoab3 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-rs2.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-rs2.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-rs2.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs2.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs2.yml /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-rs2.yml + log 'compare_kubectl: statefulset/some-name-rs2 OK' + set +o xtrace [2026-04-23T09:59:03+0000] compare_kubectl: statefulset/some-name-rs2 OK + compare_kubectl statefulset/some-name-cfg + local resource=statefulset/some-name-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-cfg.yml + local new_result=/tmp/tmp.CfvgmtWHfZ/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg + yq eval $'\n\t\t\tdel(.metadata.ownerReferences[].apiVersion) |\n\t\t\tdel(.metadata.managedFields) |\n\t\t\tdel(.. | select(has("creationTimestamp")).creationTimestamp) |\n\t\t\tdel(.. | select(has("namespace")).namespace) |\n\t\t\tdel(.. | select(has("uid")).uid) |\n\t\t\tdel(.metadata.resourceVersion) |\n\t\t\tdel(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) |\n\t\t\tdel(.metadata.selfLink) |\n\t\t\tdel(.metadata.annotations."cloud.google.com/neg") |\n\t\t\tdel(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") |\n\t\t\tdel(.. | select(has("image")).image) |\n\t\t\tdel(.. | select(has("clusterIP")).clusterIP) |\n\t\t\tdel(.. | select(has("clusterIPs")).clusterIPs) |\n\t\t\tdel(.. | select(has("dataSource")).dataSource) |\n\t\t\tdel(.. | select(has("procMount")).procMount) |\n\t\t\tdel(.. | select(has("storageClassName")).storageClassName) |\n\t\t\tdel(.. | select(has("finalizers")).finalizers) |\n\t\t\tdel(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") |\n\t\t\tdel(.. | select(has("volumeName")).volumeName) |\n\t\t\tdel(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.spec.volumeMode) |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") |\n\t\t\tdel(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") |\n\t\t\tdel(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") |\n\t\t\tdel(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) |\n\t\t\tdel(.. | select(has("healthCheckNodePort")).healthCheckNodePort) |\n\t\t\tdel(.. | select(has("nodePort")).nodePort) |\n\t\t\tdel(.status) |\n\t\t\t(.. | select(tag == "!!str")) |= sub("pitr-sharded-13591", "NAME_SPACE") |\n\t\t\tdel(.spec.volumeClaimTemplates[].apiVersion) |\n\t\t\tdel(.spec.volumeClaimTemplates[].kind) |\n\t\t\tdel(.spec.ipFamilies) |\n\t\t\tdel(.spec.ipFamilyPolicy) |\n\t\t\t(.. | select(. == "extensions/v1beta1")) = "apps/v1" |\n\t\t\t(.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.r6hePt4q0A ++ mktemp + local LAST_ERR=/tmp/tmp.KQJKtaMTcE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.r6hePt4q0A + cat /tmp/tmp.KQJKtaMTcE + rm /tmp/tmp.r6hePt4q0A /tmp/tmp.KQJKtaMTcE + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-cfg.yml + version_gt 1.22 ++ bc -l ++ echo '1.32 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-cfg.yml /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-cfg.yml + log 'compare_kubectl: statefulset/some-name-cfg OK' + set +o xtrace [2026-04-23T09:59:04+0000] compare_kubectl: statefulset/some-name-cfg OK + compare_kubectl statefulset/some-name-mongos '' + local resource=statefulset/some-name-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-mongos.yml + local new_result=/tmp/tmp.CfvgmtWHfZ/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos ++ mktemp + yq eval $'\n\t\t\tdel(.metadata.ownerReferences[].apiVersion) |\n\t\t\tdel(.metadata.managedFields) |\n\t\t\tdel(.. | select(has("creationTimestamp")).creationTimestamp) |\n\t\t\tdel(.. | select(has("namespace")).namespace) |\n\t\t\tdel(.. | select(has("uid")).uid) |\n\t\t\tdel(.metadata.resourceVersion) |\n\t\t\tdel(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) |\n\t\t\tdel(.metadata.selfLink) |\n\t\t\tdel(.metadata.annotations."cloud.google.com/neg") |\n\t\t\tdel(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") |\n\t\t\tdel(.. | select(has("image")).image) |\n\t\t\tdel(.. | select(has("clusterIP")).clusterIP) |\n\t\t\tdel(.. | select(has("clusterIPs")).clusterIPs) |\n\t\t\tdel(.. | select(has("dataSource")).dataSource) |\n\t\t\tdel(.. | select(has("procMount")).procMount) |\n\t\t\tdel(.. | select(has("storageClassName")).storageClassName) |\n\t\t\tdel(.. | select(has("finalizers")).finalizers) |\n\t\t\tdel(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") |\n\t\t\tdel(.. | select(has("volumeName")).volumeName) |\n\t\t\tdel(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.spec.volumeMode) |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") |\n\t\t\tdel(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") |\n\t\t\tdel(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") |\n\t\t\tdel(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) |\n\t\t\tdel(.. | select(has("healthCheckNodePort")).healthCheckNodePort) |\n\t\t\tdel(.. | select(has("nodePort")).nodePort) |\n\t\t\tdel(.status) |\n\t\t\t(.. | select(tag == "!!str")) |= sub("pitr-sharded-13591", "NAME_SPACE") |\n\t\t\tdel(.spec.volumeClaimTemplates[].apiVersion) |\n\t\t\tdel(.spec.volumeClaimTemplates[].kind) |\n\t\t\tdel(.spec.ipFamilies) |\n\t\t\tdel(.spec.ipFamilyPolicy) |\n\t\t\t(.. | select(. == "extensions/v1beta1")) = "apps/v1" |\n\t\t\t(.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.3KHxtPKkbN ++ mktemp + local LAST_ERR=/tmp/tmp.57sjVXYNB4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3KHxtPKkbN + cat /tmp/tmp.57sjVXYNB4 + rm /tmp/tmp.3KHxtPKkbN /tmp/tmp.57sjVXYNB4 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-mongos.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/statefulset_some-name-mongos.yml /tmp/tmp.CfvgmtWHfZ/statefulset_some-name-mongos.yml + log 'compare_kubectl: statefulset/some-name-mongos OK' + set +o xtrace [2026-04-23T09:59:05+0000] compare_kubectl: statefulset/some-name-mongos OK + write_initial_data + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.pitr-sharded-13591 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.pitr-sharded-13591 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Xa1N3ipfFD +++ mktemp ++ local LAST_ERR=/tmp/tmp.qhIw1VP1ku ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Xa1N3ipfFD ++ cat /tmp/tmp.qhIw1VP1ku ++ rm /tmp/tmp.Xa1N3ipfFD /tmp/tmp.qhIw1VP1ku ++ return 0 + local client_container=psmdb-client-bb8b97679-q9vph + kubectl_bin exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.VyL8jkfuq6 ++ mktemp + local LAST_ERR=/tmp/tmp.OkphJCzjVl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VyL8jkfuq6 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("033eb4ba-2d46-4473-8902-17a2ef93458f") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.OkphJCzjVl + rm /tmp/tmp.VyL8jkfuq6 /tmp/tmp.OkphJCzjVl + return 0 + sleep 2 + write_document + local cmp_postfix= + local sleep_value=0 + log 'write initial data, read from all' + set +o xtrace [2026-04-23T09:59:10+0000] write initial data, read from all + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-sharded-13591 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-13591 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Bc9g4ig4eF +++ mktemp ++ local LAST_ERR=/tmp/tmp.642N3ZcgUu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Bc9g4ig4eF ++ cat /tmp/tmp.642N3ZcgUu ++ rm /tmp/tmp.Bc9g4ig4eF /tmp/tmp.642N3ZcgUu ++ return 0 + local client_container=psmdb-client-bb8b97679-q9vph + kubectl_bin exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.tlsVT2Joxt ++ mktemp + local LAST_ERR=/tmp/tmp.28TxrWddmW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tlsVT2Joxt Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("2fefb135-7160-474f-877e-ded6f13762c2") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.28TxrWddmW + rm /tmp/tmp.tlsVT2Joxt /tmp/tmp.28TxrWddmW + return 0 + sleep 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-sharded-13591 + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-sharded-13591 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == true ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-04-23T09:59:13+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-sharded-13591 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-13591 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SQFe7SObsd +++ mktemp ++ local LAST_ERR=/tmp/tmp.WtexkTUfuB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SQFe7SObsd ++ cat /tmp/tmp.WtexkTUfuB ++ rm /tmp/tmp.SQFe7SObsd /tmp/tmp.WtexkTUfuB ++ return 0 + local client_container=psmdb-client-bb8b97679-q9vph + kubectl_bin exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.LYChHGZA2c ++ mktemp + local LAST_ERR=/tmp/tmp.LkEBp0ptWv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LYChHGZA2c + cat /tmp/tmp.LkEBp0ptWv + rm /tmp/tmp.LYChHGZA2c /tmp/tmp.LkEBp0ptWv + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/find.json /tmp/tmp.CfvgmtWHfZ/find + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-0...2026-04-23T09:58:16.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-1...2026-04-23T09:58:17.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-2...2026-04-23T09:58:27.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-0 + local agent_pod=some-name-rs1-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-0...2026-04-23T09:58:16.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-1 + local agent_pod=some-name-rs1-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-1...2026-04-23T09:58:18.000+0000 I listening for the commands + wait_backup_agent some-name-rs1-2 + local agent_pod=some-name-rs1-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs1-2...2026-04-23T09:58:28.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-0 + local agent_pod=some-name-rs2-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-0...2026-04-23T09:58:16.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-1 + local agent_pod=some-name-rs2-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-1...2026-04-23T09:58:19.000+0000 I listening for the commands + wait_backup_agent some-name-rs2-2 + local agent_pod=some-name-rs2-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs2-2...2026-04-23T09:58:29.000+0000 I listening for the commands + local backup_name + '[' 0 -eq 0 ']' + backup_name=backup-minio + run_backup backup-minio 0 + local name=backup-minio + local idx=0 + desc 'run backup backup-minio-0' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-0 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/conf/backup-minio.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-0/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.CptHVv192J ++ mktemp + local LAST_ERR=/tmp/tmp.0moAdYdGWF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CptHVv192J perconaservermongodbbackup.psmdb.percona.com/backup-minio-0 created + cat /tmp/tmp.0moAdYdGWF + rm /tmp/tmp.CptHVv192J /tmp/tmp.0moAdYdGWF + return 0 + wait_backup backup-minio-0 + local backup_name=backup-minio-0 + local target_state=ready + set +o xtrace waiting for backup-minio-0 to reach ready state..................OK + sleep 5 + compare_latest_restorable_time some-name-rs0 backup-minio-0 + local cluster=some-name-rs0 + local backup_name=backup-minio-0 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FNqD58GVVF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.e1wkbjVaBB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.FNqD58GVVF +++ cat /tmp/tmp.e1wkbjVaBB +++ rm /tmp/tmp.FNqD58GVVF /tmp/tmp.e1wkbjVaBB +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != null ]] ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != null ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wYRt1mDlQ7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.WmyostXhnG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wYRt1mDlQ7 +++ cat /tmp/tmp.WmyostXhnG +++ rm /tmp/tmp.wYRt1mDlQ7 /tmp/tmp.WmyostXhnG +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != null ]] ++ let retry+=1 ++ [[ 2 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != null ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vI4Z2bvCme ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9C6UZWmGie +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.vI4Z2bvCme +++ cat /tmp/tmp.9C6UZWmGie +++ rm /tmp/tmp.vI4Z2bvCme /tmp/tmp.9C6UZWmGie +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != null ]] ++ let retry+=1 ++ [[ 3 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != null ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XzBmLXJph0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.p0BRXTORmZ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.XzBmLXJph0 +++ cat /tmp/tmp.p0BRXTORmZ +++ rm /tmp/tmp.XzBmLXJph0 /tmp/tmp.p0BRXTORmZ +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != null ]] ++ let retry+=1 ++ [[ 4 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != null ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ycBqvweY89 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Jzn6GAiyZg +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ycBqvweY89 +++ cat /tmp/tmp.Jzn6GAiyZg +++ rm /tmp/tmp.ycBqvweY89 /tmp/tmp.Jzn6GAiyZg +++ return 0 ++ first_timestamp=1776938429 ++ sleep 5 ++ [[ 1776938429 != '' ]] ++ [[ 1776938429 != null ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Qjtcq7aep7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.o6rOulPH7t +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Qjtcq7aep7 +++ cat /tmp/tmp.o6rOulPH7t +++ rm /tmp/tmp.Qjtcq7aep7 /tmp/tmp.o6rOulPH7t +++ return 0 ++ second_timestamp=1776938429 ++ let retry+=1 ++ [[ 5 -gt 30 ]] ++ [[ 1776938429 != '' ]] ++ [[ 1776938429 != null ]] ++ [[ 1776938429 == 1776938429 ]] ++ /usr/sbin/date -u -d @1776938429 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2026-04-23T10:00:29Z ++ get_latest_restorable_time_from_backup_object backup-minio-0 ++ local backup_name=backup-minio-0 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-0 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.T0nIE7HN8F ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3I4VtJJNBs +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb-backup backup-minio-0 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.T0nIE7HN8F +++ cat /tmp/tmp.3I4VtJJNBs +++ rm /tmp/tmp.T0nIE7HN8F /tmp/tmp.3I4VtJJNBs +++ return 0 ++ latestRestorableTime=2026-04-23T10:00:29Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2026-04-23T10:00:29Z != '' ]] ++ [[ 2026-04-23T10:00:29Z != null ]] ++ echo 2026-04-23T10:00:29Z + backup_time=2026-04-23T10:00:29Z + [[ 2026-04-23T10:00:29Z != 2026\-04\-23T10\:00\:29Z ]] + write_document -2nd + local cmp_postfix=-2nd + local sleep_value=0 + log 'write initial data, read from all' + set +o xtrace [2026-04-23T10:00:53+0000] write initial data, read from all + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-sharded-13591 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-13591 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HkSj8gFfFw +++ mktemp ++ local LAST_ERR=/tmp/tmp.EsiqOxITgG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HkSj8gFfFw ++ cat /tmp/tmp.EsiqOxITgG ++ rm /tmp/tmp.HkSj8gFfFw /tmp/tmp.EsiqOxITgG ++ return 0 + local client_container=psmdb-client-bb8b97679-q9vph + kubectl_bin exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.hHwbkK7M8F ++ mktemp + local LAST_ERR=/tmp/tmp.BpkupIkJI3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hHwbkK7M8F Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("b22e1fa3-f61c-4de2-8c63-03029f382d15") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.BpkupIkJI3 + rm /tmp/tmp.hHwbkK7M8F /tmp/tmp.BpkupIkJI3 + return 0 + sleep 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-sharded-13591 -2nd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-sharded-13591 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == true ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-04-23T10:00:56+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-sharded-13591 mongodb '' '' 27017 + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-13591 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.G632IMDfO0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.WNpSP9gqXc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.G632IMDfO0 ++ cat /tmp/tmp.WNpSP9gqXc ++ rm /tmp/tmp.G632IMDfO0 /tmp/tmp.WNpSP9gqXc ++ return 0 + local client_container=psmdb-client-bb8b97679-q9vph + kubectl_bin exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.ji3zJomM9f ++ mktemp + local LAST_ERR=/tmp/tmp.fFI3Bd9ada + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ji3zJomM9f + cat /tmp/tmp.fFI3Bd9ada + rm /tmp/tmp.ji3zJomM9f /tmp/tmp.fFI3Bd9ada + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/find-2nd.json /tmp/tmp.CfvgmtWHfZ/find-2nd + sleep 2 ++ run_mongos 'new Date().getTime() / 1000' myApp:myPass@some-name-mongos.pitr-sharded-13591 mongodb '' --quiet ++ local 'command=new Date().getTime() / 1000' ++ local uri=myApp:myPass@some-name-mongos.pitr-sharded-13591 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local port=27017 ++ local mongo_bin=mongo ++ cut -d. -f1 +++ echo .svc.cluster.local +++ awk -F: '{print $2}' ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.EqHlO95YT0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MeeQD9mB92 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.EqHlO95YT0 +++ cat /tmp/tmp.MeeQD9mB92 +++ rm /tmp/tmp.EqHlO95YT0 /tmp/tmp.MeeQD9mB92 +++ return 0 ++ local client_container=psmdb-client-bb8b97679-q9vph ++ kubectl_bin exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''new Date().getTime() / 1000\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gbfk65KjBY +++ mktemp ++ local LAST_ERR=/tmp/tmp.crl8xiKnvg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''new Date().getTime() / 1000\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gbfk65KjBY ++ cat /tmp/tmp.crl8xiKnvg ++ rm /tmp/tmp.gbfk65KjBY /tmp/tmp.crl8xiKnvg ++ return 0 + time_now=1776938463 + check_recovery backup-minio-0 date 1776938463 -2nd some-name + local backup_name=backup-minio-0 + local restore_type=date + local restore_date=1776938463 + local cmp_postfix=-2nd + local cluster_name=some-name + local backupSource= ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.bds6x91o4z ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XkzieQGoFX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.bds6x91o4z +++ cat /tmp/tmp.XkzieQGoFX +++ rm /tmp/tmp.bds6x91o4z /tmp/tmp.XkzieQGoFX +++ return 0 ++ echo 1776938429 + local latest_ts=1776938429 + desc 'write more data before restore by date' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by date ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.pitr-sharded-13591 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-13591 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VW8tVVImPL +++ mktemp ++ local LAST_ERR=/tmp/tmp.nhVZY1FaTj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VW8tVVImPL ++ cat /tmp/tmp.nhVZY1FaTj ++ rm /tmp/tmp.VW8tVVImPL /tmp/tmp.nhVZY1FaTj ++ return 0 + local client_container=psmdb-client-bb8b97679-q9vph + kubectl_bin exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.dxQPAu6hza ++ mktemp + local LAST_ERR=/tmp/tmp.mFaIpH1shI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dxQPAu6hza Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("bb2e3d64-197d-4685-9b58-bc847953a5eb") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.mFaIpH1shI + rm /tmp/tmp.dxQPAu6hza /tmp/tmp.mFaIpH1shI + return 0 + [[ -n 1776938463 ]] ++ format_date 1776938463 ++ local timestamp=1776938463 +++ TZ=UTC +++ /usr/sbin/date -d@1776938463 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:01:03 + desc 'Restoring to time 2026-04-23 10:01:03' + set +o xtrace ----------------------------------------------------------------------------------- Restoring to time 2026-04-23 10:01:03 ----------------------------------------------------------------------------------- + retries=0 + [[ 1776938429 -gt 1776938463 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2M21sFkOK6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.A5bq8ukWye +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2M21sFkOK6 +++ cat /tmp/tmp.A5bq8ukWye +++ rm /tmp/tmp.2M21sFkOK6 /tmp/tmp.A5bq8ukWye +++ return 0 ++ echo 1776938429 + latest_ts=1776938429 + retries=1 ++ format_date 1776938429 ++ local timestamp=1776938429 +++ TZ=UTC +++ /usr/sbin/date -d@1776938429 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:00:29 ++ format_date 1776938463 ++ local timestamp=1776938463 +++ TZ=UTC +++ /usr/sbin/date -d@1776938463 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:01:03 + echo 'Waiting for last oplog chunk (2026-04-23 10:00:29) to be greater than restore target (2026-04-23 10:01:03)' Waiting for last oplog chunk (2026-04-23 10:00:29) to be greater than restore target (2026-04-23 10:01:03) + sleep 10 + [[ 1776938429 -gt 1776938463 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.H67KCqhzsO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.USFrC9yw9F +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.H67KCqhzsO +++ cat /tmp/tmp.USFrC9yw9F +++ rm /tmp/tmp.H67KCqhzsO /tmp/tmp.USFrC9yw9F +++ return 0 ++ echo 1776938429 + latest_ts=1776938429 + retries=2 ++ format_date 1776938429 ++ local timestamp=1776938429 +++ TZ=UTC +++ /usr/sbin/date -d@1776938429 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:00:29 ++ format_date 1776938463 ++ local timestamp=1776938463 +++ TZ=UTC +++ /usr/sbin/date -d@1776938463 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:01:03 + echo 'Waiting for last oplog chunk (2026-04-23 10:00:29) to be greater than restore target (2026-04-23 10:01:03)' Waiting for last oplog chunk (2026-04-23 10:00:29) to be greater than restore target (2026-04-23 10:01:03) + sleep 10 + [[ 1776938429 -gt 1776938463 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.lgnZJfTpcF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hGSu0BWCG7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.lgnZJfTpcF +++ cat /tmp/tmp.hGSu0BWCG7 +++ rm /tmp/tmp.lgnZJfTpcF /tmp/tmp.hGSu0BWCG7 +++ return 0 ++ echo 1776938429 + latest_ts=1776938429 + retries=3 ++ format_date 1776938429 ++ local timestamp=1776938429 +++ TZ=UTC +++ /usr/sbin/date -d@1776938429 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:00:29 ++ format_date 1776938463 ++ local timestamp=1776938463 +++ TZ=UTC +++ /usr/sbin/date -d@1776938463 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:01:03 + echo 'Waiting for last oplog chunk (2026-04-23 10:00:29) to be greater than restore target (2026-04-23 10:01:03)' Waiting for last oplog chunk (2026-04-23 10:00:29) to be greater than restore target (2026-04-23 10:01:03) + sleep 10 + [[ 1776938429 -gt 1776938463 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6JcNDjR2ig ++++ mktemp +++ local LAST_ERR=/tmp/tmp.G8yrU4CE7j +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6JcNDjR2ig +++ cat /tmp/tmp.G8yrU4CE7j +++ rm /tmp/tmp.6JcNDjR2ig /tmp/tmp.G8yrU4CE7j +++ return 0 ++ echo 1776938429 + latest_ts=1776938429 + retries=4 ++ format_date 1776938429 ++ local timestamp=1776938429 +++ TZ=UTC +++ /usr/sbin/date -d@1776938429 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:00:29 ++ format_date 1776938463 ++ local timestamp=1776938463 +++ TZ=UTC +++ /usr/sbin/date -d@1776938463 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:01:03 + echo 'Waiting for last oplog chunk (2026-04-23 10:00:29) to be greater than restore target (2026-04-23 10:01:03)' Waiting for last oplog chunk (2026-04-23 10:00:29) to be greater than restore target (2026-04-23 10:01:03) + sleep 10 + [[ 1776938429 -gt 1776938463 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XZj8aLYLAg ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FnRGEHOstW +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.XZj8aLYLAg +++ cat /tmp/tmp.FnRGEHOstW +++ rm /tmp/tmp.XZj8aLYLAg /tmp/tmp.FnRGEHOstW +++ return 0 ++ echo 1776938429 + latest_ts=1776938429 + retries=5 ++ format_date 1776938429 ++ local timestamp=1776938429 +++ TZ=UTC +++ /usr/sbin/date -d@1776938429 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:00:29 ++ format_date 1776938463 ++ local timestamp=1776938463 +++ TZ=UTC +++ /usr/sbin/date -d@1776938463 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:01:03 + echo 'Waiting for last oplog chunk (2026-04-23 10:00:29) to be greater than restore target (2026-04-23 10:01:03)' Waiting for last oplog chunk (2026-04-23 10:00:29) to be greater than restore target (2026-04-23 10:01:03) + sleep 10 + [[ 1776938429 -gt 1776938463 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.m2DqZPwuFR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.h21lH6xf9g +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.m2DqZPwuFR +++ cat /tmp/tmp.h21lH6xf9g +++ rm /tmp/tmp.m2DqZPwuFR /tmp/tmp.h21lH6xf9g +++ return 0 ++ echo 1776938429 + latest_ts=1776938429 + retries=6 ++ format_date 1776938429 ++ local timestamp=1776938429 +++ TZ=UTC +++ /usr/sbin/date -d@1776938429 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:00:29 ++ format_date 1776938463 ++ local timestamp=1776938463 +++ TZ=UTC +++ /usr/sbin/date -d@1776938463 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:01:03 + echo 'Waiting for last oplog chunk (2026-04-23 10:00:29) to be greater than restore target (2026-04-23 10:01:03)' Waiting for last oplog chunk (2026-04-23 10:00:29) to be greater than restore target (2026-04-23 10:01:03) + sleep 10 + [[ 1776938429 -gt 1776938463 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qR4Tli5LCI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DF8dC42mwY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.qR4Tli5LCI +++ cat /tmp/tmp.DF8dC42mwY +++ rm /tmp/tmp.qR4Tli5LCI /tmp/tmp.DF8dC42mwY +++ return 0 ++ echo 1776938429 + latest_ts=1776938429 + retries=7 ++ format_date 1776938429 ++ local timestamp=1776938429 +++ TZ=UTC +++ /usr/sbin/date -d@1776938429 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:00:29 ++ format_date 1776938463 ++ local timestamp=1776938463 +++ TZ=UTC +++ /usr/sbin/date -d@1776938463 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:01:03 + echo 'Waiting for last oplog chunk (2026-04-23 10:00:29) to be greater than restore target (2026-04-23 10:01:03)' Waiting for last oplog chunk (2026-04-23 10:00:29) to be greater than restore target (2026-04-23 10:01:03) + sleep 10 + [[ 1776938429 -gt 1776938463 ]] + [[ 7 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.MFNMBQ9aPN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VtXUqMsqwY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.MFNMBQ9aPN +++ cat /tmp/tmp.VtXUqMsqwY +++ rm /tmp/tmp.MFNMBQ9aPN /tmp/tmp.VtXUqMsqwY +++ return 0 ++ echo 1776938429 + latest_ts=1776938429 + retries=8 ++ format_date 1776938429 ++ local timestamp=1776938429 +++ TZ=UTC +++ /usr/sbin/date -d@1776938429 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:00:29 ++ format_date 1776938463 ++ local timestamp=1776938463 +++ TZ=UTC +++ /usr/sbin/date -d@1776938463 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:01:03 + echo 'Waiting for last oplog chunk (2026-04-23 10:00:29) to be greater than restore target (2026-04-23 10:01:03)' Waiting for last oplog chunk (2026-04-23 10:00:29) to be greater than restore target (2026-04-23 10:01:03) + sleep 10 + [[ 1776938429 -gt 1776938463 ]] + [[ 8 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2ccXUrME00 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.v2ddQFQega +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2ccXUrME00 +++ cat /tmp/tmp.v2ddQFQega +++ rm /tmp/tmp.2ccXUrME00 /tmp/tmp.v2ddQFQega +++ return 0 ++ echo 1776938430 + latest_ts=1776938430 + retries=9 ++ format_date 1776938430 ++ local timestamp=1776938430 +++ TZ=UTC +++ /usr/sbin/date -d@1776938430 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:00:30 ++ format_date 1776938463 ++ local timestamp=1776938463 +++ TZ=UTC +++ /usr/sbin/date -d@1776938463 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:01:03 + echo 'Waiting for last oplog chunk (2026-04-23 10:00:30) to be greater than restore target (2026-04-23 10:01:03)' Waiting for last oplog chunk (2026-04-23 10:00:30) to be greater than restore target (2026-04-23 10:01:03) + sleep 10 + [[ 1776938430 -gt 1776938463 ]] + [[ 9 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ihSnF43Nx1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.U1QAbXlsxE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ihSnF43Nx1 +++ cat /tmp/tmp.U1QAbXlsxE +++ rm /tmp/tmp.ihSnF43Nx1 /tmp/tmp.U1QAbXlsxE +++ return 0 ++ echo 1776938549 + latest_ts=1776938549 + retries=10 ++ format_date 1776938549 ++ local timestamp=1776938549 +++ TZ=UTC +++ /usr/sbin/date -d@1776938549 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:02:29 ++ format_date 1776938463 ++ local timestamp=1776938463 +++ TZ=UTC +++ /usr/sbin/date -d@1776938463 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:01:03 + echo 'Waiting for last oplog chunk (2026-04-23 10:02:29) to be greater than restore target (2026-04-23 10:01:03)' Waiting for last oplog chunk (2026-04-23 10:02:29) to be greater than restore target (2026-04-23 10:01:03) + sleep 10 + [[ 1776938549 -gt 1776938463 ]] + '[' -z '' ']' + desc 'check restore by date' + set +o xtrace ----------------------------------------------------------------------------------- check restore by date ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-0/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio-0/' + /usr/sbin/sed -e /backupSource/,+8d + /usr/sbin/sed -e 's/pitrType:/type: date/' + '[' -z 1776938463 ']' + kubectl_bin apply -f - ++ format_date 1776938463 ++ local timestamp=1776938463 ++ mktemp +++ TZ=UTC +++ /usr/sbin/date -d@1776938463 '+%Y-%m-%d %H:%M:%S' + local LAST_OUT=/tmp/tmp.DHM6hHiSTC ++ mktemp ++ echo 2026-04-23 10:01:03 + /usr/sbin/sed -e 's/date:/date: 2026-04-23 10:01:03/' + local LAST_ERR=/tmp/tmp.XNqD5dIyNo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DHM6hHiSTC perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-0 created + cat /tmp/tmp.XNqD5dIyNo + rm /tmp/tmp.DHM6hHiSTC /tmp/tmp.XNqD5dIyNo + return 0 + wait_restore backup-minio-0 some-name requested 0 1200 + local backup_name=backup-minio-0 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-0 object to be created.OK Waiting psmdb-restore/restore-backup-minio-0 to reach state "requested" ..OK after 1 minutes + [[ 0 -eq 1 ]] + echo + wait_restore backup-minio-0 some-name ready 0 1600 + local backup_name=backup-minio-0 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1600 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-0 object to be created.OK Waiting psmdb-restore/restore-backup-minio-0 to reach state "ready" .OK after 0 minutes + [[ 0 -eq 1 ]] + echo + set -o xtrace + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RxF92dfEMD +++ mktemp ++ local LAST_ERR=/tmp/tmp.swf9B002ui ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RxF92dfEMD ++ cat /tmp/tmp.swf9B002ui ++ rm /tmp/tmp.RxF92dfEMD /tmp/tmp.swf9B002ui ++ return 0 + [[ '' == true ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RFGDoT8wSc +++ mktemp ++ local LAST_ERR=/tmp/tmp.4QPQWQvPqY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RFGDoT8wSc ++ cat /tmp/tmp.4QPQWQvPqY ++ rm /tmp/tmp.RFGDoT8wSc /tmp/tmp.4QPQWQvPqY ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GkGDeYmXUw +++ mktemp ++ local LAST_ERR=/tmp/tmp.uN45R0cETW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GkGDeYmXUw ++ cat /tmp/tmp.uN45R0cETW ++ rm /tmp/tmp.GkGDeYmXUw /tmp/tmp.uN45R0cETW ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness........................... + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.M5BPHFm7Mc +++ mktemp ++ local LAST_ERR=/tmp/tmp.4WMypb6B8E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M5BPHFm7Mc ++ cat /tmp/tmp.4WMypb6B8E ++ rm /tmp/tmp.M5BPHFm7Mc /tmp/tmp.4WMypb6B8E ++ return 0 + [[ '' == true ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uzu7B55Lz8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.SAcTkjD000 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uzu7B55Lz8 ++ cat /tmp/tmp.SAcTkjD000 ++ rm /tmp/tmp.uzu7B55Lz8 /tmp/tmp.SAcTkjD000 ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9wfMyA4CJK +++ mktemp ++ local LAST_ERR=/tmp/tmp.GfmReHUBNU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9wfMyA4CJK ++ cat /tmp/tmp.GfmReHUBNU ++ rm /tmp/tmp.9wfMyA4CJK /tmp/tmp.GfmReHUBNU ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7JbwZYU4kf +++ mktemp ++ local LAST_ERR=/tmp/tmp.ELAKWA3Nhx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7JbwZYU4kf ++ cat /tmp/tmp.ELAKWA3Nhx ++ rm /tmp/tmp.7JbwZYU4kf /tmp/tmp.ELAKWA3Nhx ++ return 0 + [[ '' == true ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SM5Xlqu1vi +++ mktemp ++ local LAST_ERR=/tmp/tmp.y9lQeiwC11 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SM5Xlqu1vi ++ cat /tmp/tmp.y9lQeiwC11 ++ rm /tmp/tmp.SM5Xlqu1vi /tmp/tmp.y9lQeiwC11 ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.V48SmVZX0S +++ mktemp ++ local LAST_ERR=/tmp/tmp.SSDm1WNA7d ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.V48SmVZX0S ++ cat /tmp/tmp.SSDm1WNA7d ++ rm /tmp/tmp.V48SmVZX0S /tmp/tmp.SSDm1WNA7d ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness + sleep 10 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-sharded-13591 -2nd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-sharded-13591 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == true ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-04-23T10:06:50+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-sharded-13591 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-13591 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0gHmzR7Ing +++ mktemp ++ local LAST_ERR=/tmp/tmp.VOrZYc6B91 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0gHmzR7Ing ++ cat /tmp/tmp.VOrZYc6B91 ++ rm /tmp/tmp.0gHmzR7Ing /tmp/tmp.VOrZYc6B91 ++ return 0 + local client_container=psmdb-client-bb8b97679-q9vph + kubectl_bin exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.CPluneuxWz ++ mktemp + local LAST_ERR=/tmp/tmp.gDeO7lnueA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CPluneuxWz + cat /tmp/tmp.gDeO7lnueA + rm /tmp/tmp.CPluneuxWz /tmp/tmp.gDeO7lnueA + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/find-2nd.json /tmp/tmp.CfvgmtWHfZ/find-2nd + run_backup backup-minio 1 + local name=backup-minio + local idx=1 + desc 'run backup backup-minio-1' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-1 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/conf/backup-minio.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-1/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.xDwbvPaamB ++ mktemp + local LAST_ERR=/tmp/tmp.uTzBejUPuN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xDwbvPaamB perconaservermongodbbackup.psmdb.percona.com/backup-minio-1 created + cat /tmp/tmp.uTzBejUPuN + rm /tmp/tmp.xDwbvPaamB /tmp/tmp.uTzBejUPuN + return 0 + wait_backup backup-minio-1 + local backup_name=backup-minio-1 + local target_state=ready + set +o xtrace waiting for backup-minio-1 to reach ready state..................OK + sleep 5 + check_recovery backup-minio-1 latest '' -3rd some-name + local backup_name=backup-minio-1 + local restore_type=latest + local restore_date= + local cmp_postfix=-3rd + local cluster_name=some-name + local backupSource= ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6ekWqNUXZB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.A0GZsdGYe0 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6ekWqNUXZB +++ cat /tmp/tmp.A0GZsdGYe0 +++ rm /tmp/tmp.6ekWqNUXZB /tmp/tmp.A0GZsdGYe0 +++ return 0 ++ echo 1776938659 + local latest_ts=1776938659 + desc 'write more data before restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by latest ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.pitr-sharded-13591 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-13591 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lxI7Ex3MND +++ mktemp ++ local LAST_ERR=/tmp/tmp.teUNrN6I0j ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lxI7Ex3MND ++ cat /tmp/tmp.teUNrN6I0j ++ rm /tmp/tmp.lxI7Ex3MND /tmp/tmp.teUNrN6I0j ++ return 0 + local client_container=psmdb-client-bb8b97679-q9vph + kubectl_bin exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.fKXd470IRA ++ mktemp + local LAST_ERR=/tmp/tmp.RBN65fW3tP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fKXd470IRA Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("2bb4af5f-3fa7-44b8-b518-f02e0540ffff") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.RBN65fW3tP + rm /tmp/tmp.fKXd470IRA /tmp/tmp.RBN65fW3tP + return 0 + [[ -n '' ]] + desc 'Restoring to latest' + set +o xtrace ----------------------------------------------------------------------------------- Restoring to latest ----------------------------------------------------------------------------------- ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.47j8DtzWDN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.H6rlCs8eo2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.47j8DtzWDN +++ cat /tmp/tmp.H6rlCs8eo2 +++ rm /tmp/tmp.47j8DtzWDN /tmp/tmp.H6rlCs8eo2 +++ return 0 ++ echo 1776938659 + local current_ts=1776938659 + retries=0 + [[ 1776938659 -gt 1776938659 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dhQJu5iG8d ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QI3Tbnq4nQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.dhQJu5iG8d +++ cat /tmp/tmp.QI3Tbnq4nQ +++ rm /tmp/tmp.dhQJu5iG8d /tmp/tmp.QI3Tbnq4nQ +++ return 0 ++ echo 1776938659 + latest_ts=1776938659 + retries=1 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 + echo 'Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19)' Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19) + sleep 10 + [[ 1776938659 -gt 1776938659 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vIEy6Y5SEZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3XE1dk3KW9 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.vIEy6Y5SEZ +++ cat /tmp/tmp.3XE1dk3KW9 +++ rm /tmp/tmp.vIEy6Y5SEZ /tmp/tmp.3XE1dk3KW9 +++ return 0 ++ echo 1776938659 + latest_ts=1776938659 + retries=2 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 + echo 'Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19)' Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19) + sleep 10 + [[ 1776938659 -gt 1776938659 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fq4TmBOyoR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.oV0FPaw1qg +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.fq4TmBOyoR +++ cat /tmp/tmp.oV0FPaw1qg +++ rm /tmp/tmp.fq4TmBOyoR /tmp/tmp.oV0FPaw1qg +++ return 0 ++ echo 1776938659 + latest_ts=1776938659 + retries=3 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 + echo 'Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19)' Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19) + sleep 10 + [[ 1776938659 -gt 1776938659 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fnMvK4spTk ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JiOAlVlkQG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.fnMvK4spTk +++ cat /tmp/tmp.JiOAlVlkQG +++ rm /tmp/tmp.fnMvK4spTk /tmp/tmp.JiOAlVlkQG +++ return 0 ++ echo 1776938659 + latest_ts=1776938659 + retries=4 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 + echo 'Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19)' Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19) + sleep 10 + [[ 1776938659 -gt 1776938659 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WjA1Fub7RP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.r6QKGvI32O +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.WjA1Fub7RP +++ cat /tmp/tmp.r6QKGvI32O +++ rm /tmp/tmp.WjA1Fub7RP /tmp/tmp.r6QKGvI32O +++ return 0 ++ echo 1776938659 + latest_ts=1776938659 + retries=5 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 + echo 'Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19)' Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19) + sleep 10 + [[ 1776938659 -gt 1776938659 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3cn93qHDmA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xpeDPBXjbc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.3cn93qHDmA +++ cat /tmp/tmp.xpeDPBXjbc +++ rm /tmp/tmp.3cn93qHDmA /tmp/tmp.xpeDPBXjbc +++ return 0 ++ echo 1776938659 + latest_ts=1776938659 + retries=6 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 + echo 'Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19)' Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19) + sleep 10 + [[ 1776938659 -gt 1776938659 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.yrW6RydU2e ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9600q8K0aw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.yrW6RydU2e +++ cat /tmp/tmp.9600q8K0aw +++ rm /tmp/tmp.yrW6RydU2e /tmp/tmp.9600q8K0aw +++ return 0 ++ echo 1776938659 + latest_ts=1776938659 + retries=7 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 + echo 'Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19)' Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19) + sleep 10 + [[ 1776938659 -gt 1776938659 ]] + [[ 7 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xyqYI6anuE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7m8xqZAf5b +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.xyqYI6anuE +++ cat /tmp/tmp.7m8xqZAf5b +++ rm /tmp/tmp.xyqYI6anuE /tmp/tmp.7m8xqZAf5b +++ return 0 ++ echo 1776938659 + latest_ts=1776938659 + retries=8 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 + echo 'Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19)' Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19) + sleep 10 + [[ 1776938659 -gt 1776938659 ]] + [[ 8 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wZ9n5DrijV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Xy50KB1HDR +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wZ9n5DrijV +++ cat /tmp/tmp.Xy50KB1HDR +++ rm /tmp/tmp.wZ9n5DrijV /tmp/tmp.Xy50KB1HDR +++ return 0 ++ echo 1776938659 + latest_ts=1776938659 + retries=9 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 + echo 'Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19)' Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19) + sleep 10 + [[ 1776938659 -gt 1776938659 ]] + [[ 9 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nFcI73gWul ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7WLZDQk2Tm +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.nFcI73gWul +++ cat /tmp/tmp.7WLZDQk2Tm +++ rm /tmp/tmp.nFcI73gWul /tmp/tmp.7WLZDQk2Tm +++ return 0 ++ echo 1776938659 + latest_ts=1776938659 + retries=10 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 + echo 'Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19)' Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19) + sleep 10 + [[ 1776938659 -gt 1776938659 ]] + [[ 10 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kdEhfK4dsw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xEYARWLD0b +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kdEhfK4dsw +++ cat /tmp/tmp.xEYARWLD0b +++ rm /tmp/tmp.kdEhfK4dsw /tmp/tmp.xEYARWLD0b +++ return 0 ++ echo 1776938659 + latest_ts=1776938659 + retries=11 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 + echo 'Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19)' Waiting for last oplog chunk (2026-04-23 10:04:19) to be 120 seconds older than starting chunk (2026-04-23 10:04:19) + sleep 10 + [[ 1776938659 -gt 1776938659 ]] + [[ 11 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.l0cMk2aRjZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ecYxu7dAf8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.l0cMk2aRjZ +++ cat /tmp/tmp.ecYxu7dAf8 +++ rm /tmp/tmp.l0cMk2aRjZ /tmp/tmp.ecYxu7dAf8 +++ return 0 ++ echo 1776938979 + latest_ts=1776938979 + retries=12 ++ format_date 1776938979 ++ local timestamp=1776938979 +++ TZ=UTC +++ /usr/sbin/date -d@1776938979 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:09:39 ++ format_date 1776938659 ++ local timestamp=1776938659 +++ TZ=UTC +++ /usr/sbin/date -d@1776938659 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 10:04:19 + echo 'Waiting for last oplog chunk (2026-04-23 10:09:39) to be 120 seconds older than starting chunk (2026-04-23 10:04:19)' Waiting for last oplog chunk (2026-04-23 10:09:39) to be 120 seconds older than starting chunk (2026-04-23 10:04:19) + sleep 10 + [[ 1776938979 -gt 1776938659 ]] + '[' -z '' ']' + desc 'check restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- check restore by latest ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-1/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio-1/' + /usr/sbin/sed -e /backupSource/,+8d + /usr/sbin/sed -e 's/pitrType:/type: latest/' + kubectl_bin apply -f - + '[' -z '' ']' + /usr/sbin/sed -e /date:/d ++ mktemp + local LAST_OUT=/tmp/tmp.vJIQ0LHlno ++ mktemp + local LAST_ERR=/tmp/tmp.hBAgDkl7ww + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vJIQ0LHlno perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-1 created + cat /tmp/tmp.hBAgDkl7ww + rm /tmp/tmp.vJIQ0LHlno /tmp/tmp.hBAgDkl7ww + return 0 + wait_restore backup-minio-1 some-name requested 0 1200 + local backup_name=backup-minio-1 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-1 object to be created.OK Waiting psmdb-restore/restore-backup-minio-1 to reach state "requested" ..OK after 1 minutes + [[ 0 -eq 1 ]] + echo + wait_restore backup-minio-1 some-name ready 0 1600 + local backup_name=backup-minio-1 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1600 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-1 object to be created.OK Waiting psmdb-restore/restore-backup-minio-1 to reach state "ready" .OK after 0 minutes + [[ 0 -eq 1 ]] + echo + set -o xtrace + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.npAzbdk6EH +++ mktemp ++ local LAST_ERR=/tmp/tmp.WCU2wVDaXE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.npAzbdk6EH ++ cat /tmp/tmp.WCU2wVDaXE ++ rm /tmp/tmp.npAzbdk6EH /tmp/tmp.WCU2wVDaXE ++ return 0 + [[ '' == true ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dQbfp5of9U +++ mktemp ++ local LAST_ERR=/tmp/tmp.K8IF6zs5d4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dQbfp5of9U ++ cat /tmp/tmp.K8IF6zs5d4 ++ rm /tmp/tmp.dQbfp5of9U /tmp/tmp.K8IF6zs5d4 ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xrAEkPabiM +++ mktemp ++ local LAST_ERR=/tmp/tmp.VMnB1UeP10 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xrAEkPabiM ++ cat /tmp/tmp.VMnB1UeP10 ++ rm /tmp/tmp.xrAEkPabiM /tmp/tmp.VMnB1UeP10 ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness....................... + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iqUfmBbfkB +++ mktemp ++ local LAST_ERR=/tmp/tmp.1zsMwBAgX9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iqUfmBbfkB ++ cat /tmp/tmp.1zsMwBAgX9 ++ rm /tmp/tmp.iqUfmBbfkB /tmp/tmp.1zsMwBAgX9 ++ return 0 + [[ '' == true ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2sMlG0PDXi +++ mktemp ++ local LAST_ERR=/tmp/tmp.aPCYCN3Tjj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2sMlG0PDXi ++ cat /tmp/tmp.aPCYCN3Tjj ++ rm /tmp/tmp.2sMlG0PDXi /tmp/tmp.aPCYCN3Tjj ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HVlFyBDQs9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.b24VCukMeB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HVlFyBDQs9 ++ cat /tmp/tmp.b24VCukMeB ++ rm /tmp/tmp.HVlFyBDQs9 /tmp/tmp.b24VCukMeB ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dYt3vVs5QM +++ mktemp ++ local LAST_ERR=/tmp/tmp.lqT7XDWt54 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dYt3vVs5QM ++ cat /tmp/tmp.lqT7XDWt54 ++ rm /tmp/tmp.dYt3vVs5QM /tmp/tmp.lqT7XDWt54 ++ return 0 + [[ '' == true ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UPKwYR6Xqu +++ mktemp ++ local LAST_ERR=/tmp/tmp.7VnapIgTp7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UPKwYR6Xqu ++ cat /tmp/tmp.7VnapIgTp7 ++ rm /tmp/tmp.UPKwYR6Xqu /tmp/tmp.7VnapIgTp7 ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gQpFKwn5zf +++ mktemp ++ local LAST_ERR=/tmp/tmp.GQFTKHyZHW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gQpFKwn5zf ++ cat /tmp/tmp.GQFTKHyZHW ++ rm /tmp/tmp.gQpFKwn5zf /tmp/tmp.GQFTKHyZHW ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness + sleep 10 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-sharded-13591 -3rd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-sharded-13591 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == true ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-04-23T10:13:25+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-sharded-13591 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-13591 + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CfGdSc8TWM +++ mktemp ++ local LAST_ERR=/tmp/tmp.wi60MTBAEd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CfGdSc8TWM ++ cat /tmp/tmp.wi60MTBAEd ++ rm /tmp/tmp.CfGdSc8TWM /tmp/tmp.wi60MTBAEd ++ return 0 + local client_container=psmdb-client-bb8b97679-q9vph + kubectl_bin exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.sbHjvfCRC3 ++ mktemp + local LAST_ERR=/tmp/tmp.Gd7yjKDTT9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-q9vph -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-13591.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sbHjvfCRC3 + cat /tmp/tmp.Gd7yjKDTT9 + rm /tmp/tmp.sbHjvfCRC3 /tmp/tmp.Gd7yjKDTT9 + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/pitr-sharded/compare/find-3rd.json /tmp/tmp.CfvgmtWHfZ/find-3rd + compare_latest_restorable_time some-name-rs0 backup-minio-1 + local cluster=some-name-rs0 + local backup_name=backup-minio-1 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XwPndEM3Cp ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7HYFIkJWzZ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.XwPndEM3Cp +++ cat /tmp/tmp.7HYFIkJWzZ +++ rm /tmp/tmp.XwPndEM3Cp /tmp/tmp.7HYFIkJWzZ +++ return 0 ++ first_timestamp=1776939068 ++ sleep 5 ++ [[ 1776939068 != '' ]] ++ [[ 1776939068 != null ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.k6STqyNObR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0ArRRryxz8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.k6STqyNObR +++ cat /tmp/tmp.0ArRRryxz8 +++ rm /tmp/tmp.k6STqyNObR /tmp/tmp.0ArRRryxz8 +++ return 0 ++ second_timestamp=1776939068 ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 1776939068 != '' ]] ++ [[ 1776939068 != null ]] ++ [[ 1776939068 == 1776939068 ]] ++ /usr/sbin/date -u -d @1776939068 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2026-04-23T10:11:08Z ++ get_latest_restorable_time_from_backup_object backup-minio-1 ++ local backup_name=backup-minio-1 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-1 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kqyo6g9mev ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KA6cEssPaW +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb-backup backup-minio-1 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kqyo6g9mev +++ cat /tmp/tmp.KA6cEssPaW +++ rm /tmp/tmp.kqyo6g9mev /tmp/tmp.KA6cEssPaW +++ return 0 ++ latestRestorableTime=2026-04-23T10:11:08Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2026-04-23T10:11:08Z != '' ]] ++ [[ 2026-04-23T10:11:08Z != null ]] ++ echo 2026-04-23T10:11:08Z + backup_time=2026-04-23T10:11:08Z + [[ 2026-04-23T10:11:08Z != 2026\-04\-23T10\:11\:08Z ]] + desc 'delete custom RuntimeClass' + set +o xtrace ----------------------------------------------------------------------------------- delete custom RuntimeClass ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/conf/container-rc.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.g6z0MZu0rm ++ mktemp + local LAST_ERR=/tmp/tmp.9Uauu7vCML + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/e2e-tests/conf/container-rc.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g6z0MZu0rm runtimeclass.node.k8s.io "container-rc" deleted + cat /tmp/tmp.9Uauu7vCML + rm /tmp/tmp.g6z0MZu0rm /tmp/tmp.9Uauu7vCML + return 0 + destroy pitr-sharded-13591 + local namespace=pitr-sharded-13591 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.kp0t2DOpLS +++ mktemp ++ local LAST_ERR=/tmp/tmp.rVLK79uTuA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kp0t2DOpLS ++ cat /tmp/tmp.rVLK79uTuA ++ rm /tmp/tmp.kp0t2DOpLS /tmp/tmp.rVLK79uTuA ++ return 0 + '[' 2 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.44cMGJRwon ++ mktemp + local LAST_ERR=/tmp/tmp.LPaxhA39b4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.44cMGJRwon NAME CLUSTER STORAGE DESTINATION TYPE SIZE STATUS COMPLETED AGE backup-minio-0 some-name minio s3://operator-testing/2026-04-23T09:59:30Z logical 157.03KB ready 13m 14m backup-minio-1 some-name minio s3://operator-testing/2026-04-23T10:06:56Z logical 181.08KB ready 6m23s 6m51s + cat /tmp/tmp.LPaxhA39b4 + rm /tmp/tmp.44cMGJRwon /tmp/tmp.LPaxhA39b4 + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.JAXnVC2jqt ++ mktemp + local LAST_ERR=/tmp/tmp.JOl7sYelJe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JAXnVC2jqt perconaservermongodbbackup.psmdb.percona.com "backup-minio-0" deleted from pitr-sharded-13591 namespace perconaservermongodbbackup.psmdb.percona.com "backup-minio-1" deleted from pitr-sharded-13591 namespace + cat /tmp/tmp.JOl7sYelJe + rm /tmp/tmp.JAXnVC2jqt /tmp/tmp.JOl7sYelJe + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.FeGo9N6rLT ++ mktemp + local LAST_ERR=/tmp/tmp.jn50F22ycI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FeGo9N6rLT customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.jn50F22ycI + rm /tmp/tmp.FeGo9N6rLT /tmp/tmp.jn50F22ycI + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.myT8qZRauH ++ mktemp + local LAST_ERR=/tmp/tmp.CoHVWorHfY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.myT8qZRauH + cat /tmp/tmp.CoHVWorHfY + rm /tmp/tmp.myT8qZRauH /tmp/tmp.CoHVWorHfY + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.h57lR9LMCY ++ mktemp + local LAST_ERR=/tmp/tmp.B7vP1mk3JN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.h57lR9LMCY + cat /tmp/tmp.B7vP1mk3JN + rm /tmp/tmp.h57lR9LMCY /tmp/tmp.B7vP1mk3JN + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.o2qIR1vnki ++ mktemp + local LAST_ERR=/tmp/tmp.Y4oPFj84BF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.o2qIR1vnki + cat /tmp/tmp.Y4oPFj84BF + rm /tmp/tmp.o2qIR1vnki /tmp/tmp.Y4oPFj84BF + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.jGRLii5sSm ++ mktemp + local LAST_ERR=/tmp/tmp.gNrfhLj0dA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2269/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jGRLii5sSm clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.gNrfhLj0dA + rm /tmp/tmp.jGRLii5sSm /tmp/tmp.gNrfhLj0dA + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.DlQ9qCjDmV ++ mktemp + local LAST_ERR=/tmp/tmp.Vh9vDTpHC6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.DlQ9qCjDmV + cat /tmp/tmp.Vh9vDTpHC6 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.DlQ9qCjDmV + cat /tmp/tmp.Vh9vDTpHC6 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.DlQ9qCjDmV + cat /tmp/tmp.Vh9vDTpHC6 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.DlQ9qCjDmV + cat /tmp/tmp.Vh9vDTpHC6 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.DlQ9qCjDmV /tmp/tmp.Vh9vDTpHC6 + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace pitr-sharded-13591 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator + rm -rf /tmp/tmp.CfvgmtWHfZ ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.3bnI20hMcX ++ mktemp + local LAST_OUT=/tmp/tmp.9pLQSKVo6D + local LAST_ERR=/tmp/tmp.QZZUlk402D + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.0guHCGzbP2 + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace pitr-sharded-13591