Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/logs/pitr.log Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + main + create_infra pitr-30181 + local ns=pitr-30181 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.77hRwV7DhH ++ mktemp + local LAST_ERR=/tmp/tmp.TzhKGWqhEu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.77hRwV7DhH customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.TzhKGWqhEu + rm /tmp/tmp.77hRwV7DhH /tmp/tmp.TzhKGWqhEu + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.0wwd3fhk9O ++ mktemp + local LAST_ERR=/tmp/tmp.yOa446EDu2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0wwd3fhk9O + cat /tmp/tmp.yOa446EDu2 + rm /tmp/tmp.0wwd3fhk9O /tmp/tmp.yOa446EDu2 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.0ya2ber0DI ++ mktemp + local LAST_ERR=/tmp/tmp.7bh54jvxNb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0ya2ber0DI + cat /tmp/tmp.7bh54jvxNb + rm /tmp/tmp.0ya2ber0DI /tmp/tmp.7bh54jvxNb + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.H01SXWkKvM ++ mktemp + local LAST_ERR=/tmp/tmp.tIGYb55BXs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.H01SXWkKvM + cat /tmp/tmp.tIGYb55BXs + rm /tmp/tmp.H01SXWkKvM /tmp/tmp.tIGYb55BXs + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.qcwsvNnrnJ ++ mktemp + local LAST_ERR=/tmp/tmp.zD6wvGY4gC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qcwsvNnrnJ clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.zD6wvGY4gC + rm /tmp/tmp.qcwsvNnrnJ /tmp/tmp.zD6wvGY4gC + return 0 + check_crd_for_deletion PR-2104-591a1b50 + local git_tag=PR-2104-591a1b50 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2104-591a1b50/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vYc4DJN9Pf +++ mktemp ++ local LAST_ERR=/tmp/tmp.33J1v5zGeu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.vYc4DJN9Pf ++ cat /tmp/tmp.33J1v5zGeu Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.vYc4DJN9Pf ++ cat /tmp/tmp.33J1v5zGeu Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.vYc4DJN9Pf ++ cat /tmp/tmp.33J1v5zGeu Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.vYc4DJN9Pf ++ cat /tmp/tmp.33J1v5zGeu Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.vYc4DJN9Pf /tmp/tmp.33J1v5zGeu ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ tail -n1 ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + awk '{print$1}' + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.S7QrO5DK65 ++ mktemp + local LAST_OUT=/tmp/tmp.Xkkx1Sm83l ++ mktemp + local LAST_ERR=/tmp/tmp.Xb1Y2LOOeK + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.CTyNEuWpng + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Xkkx1Sm83l + cat /tmp/tmp.CTyNEuWpng + rm /tmp/tmp.Xkkx1Sm83l /tmp/tmp.CTyNEuWpng + return 0 namespace "pitr-18992" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.S7QrO5DK65 namespace "psmdb-operator" deleted + cat /tmp/tmp.Xb1Y2LOOeK + rm /tmp/tmp.S7QrO5DK65 /tmp/tmp.Xb1Y2LOOeK + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Ti14tG5WP8 ++ mktemp + local LAST_ERR=/tmp/tmp.by3F8nVxfS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ti14tG5WP8 + cat /tmp/tmp.by3F8nVxfS + rm /tmp/tmp.Ti14tG5WP8 /tmp/tmp.by3F8nVxfS + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.w5mGkolLQz ++ mktemp + local LAST_ERR=/tmp/tmp.6KnaFkzqGG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.w5mGkolLQz namespace/psmdb-operator created + cat /tmp/tmp.6KnaFkzqGG + rm /tmp/tmp.w5mGkolLQz /tmp/tmp.6KnaFkzqGG + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.Y2LVzDo9VG +++ mktemp ++ local LAST_ERR=/tmp/tmp.yKrFz2wglV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Y2LVzDo9VG ++ cat /tmp/tmp.yKrFz2wglV ++ rm /tmp/tmp.Y2LVzDo9VG /tmp/tmp.yKrFz2wglV ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2104-591a1b50-1-cluster6 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.K2Lims7uWf ++ mktemp + local LAST_ERR=/tmp/tmp.on9pD9SfUc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2104-591a1b50-1-cluster6 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.K2Lims7uWf Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2104-591a1b50-1-cluster6" modified. + cat /tmp/tmp.on9pD9SfUc + rm /tmp/tmp.K2Lims7uWf /tmp/tmp.on9pD9SfUc + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2104-591a1b50' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2104-591a1b50 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.iwBRt9ZUkr ++ mktemp + local LAST_ERR=/tmp/tmp.1bOaEZbVkt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iwBRt9ZUkr customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.1bOaEZbVkt + rm /tmp/tmp.iwBRt9ZUkr /tmp/tmp.1bOaEZbVkt + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.NCOolXqBJf ++ mktemp + local LAST_ERR=/tmp/tmp.gC4gpQfsnA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NCOolXqBJf clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.gC4gpQfsnA + rm /tmp/tmp.NCOolXqBJf /tmp/tmp.gC4gpQfsnA + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2104-591a1b50") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.ErjIUyEmWj ++ mktemp + local LAST_ERR=/tmp/tmp.u47srnoz4I + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ErjIUyEmWj deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.u47srnoz4I + rm /tmp/tmp.ErjIUyEmWj /tmp/tmp.u47srnoz4I + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.CsoaBp0Fxo +++ mktemp ++ local LAST_ERR=/tmp/tmp.wHgjpZkCIL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CsoaBp0Fxo ++ cat /tmp/tmp.wHgjpZkCIL ++ rm /tmp/tmp.CsoaBp0Fxo /tmp/tmp.wHgjpZkCIL ++ return 0 + wait_operator_pod percona-server-mongodb-operator-6cd4b44944-vnct2 + local pod=percona-server-mongodb-operator-6cd4b44944-vnct2 + set +o xtrace waiting for pod/percona-server-mongodb-operator-6cd4b44944-vnct2 to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.0XZLDBqHLB +++ mktemp ++ local LAST_ERR=/tmp/tmp.iM83JKcwtF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0XZLDBqHLB ++ cat /tmp/tmp.iM83JKcwtF ++ rm /tmp/tmp.0XZLDBqHLB /tmp/tmp.iM83JKcwtF ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-6cd4b44944-vnct2 ++ mktemp + local LAST_OUT=/tmp/tmp.yY9CTevLFI ++ mktemp + local LAST_ERR=/tmp/tmp.rjc4db65C4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-6cd4b44944-vnct2 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yY9CTevLFI + cat /tmp/tmp.rjc4db65C4 + rm /tmp/tmp.yY9CTevLFI /tmp/tmp.rjc4db65C4 + return 0 2025-11-04T22:17:13.590Z INFO setup Manager starting up {"gitCommit": "591a1b50aa738bd0b38117b064923b562d47965f", "gitBranch": "PR-2104-591a1b50", "buildTime": "", "goVersion": "go1.25.3", "os": "linux", "arch": "amd64"} + create_namespace pitr-30181 + local namespace=pitr-30181 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ tail -n1 ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + awk '{print$1}' + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + desc 'cleaned up old namespaces pitr-30181' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pitr-30181 ----------------------------------------------------------------------------------- + xargs kubectl delete ns + kubectl_bin delete namespace pitr-30181 --ignore-not-found ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.duk1e2GREu ++ mktemp + local LAST_OUT=/tmp/tmp.raLfyxYmlB + local LAST_ERR=/tmp/tmp.WQ7aK4NJNL + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.eD8L4FwlX6 + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get ns ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace pitr-30181 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.raLfyxYmlB + cat /tmp/tmp.eD8L4FwlX6 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + rm /tmp/tmp.raLfyxYmlB /tmp/tmp.eD8L4FwlX6 + break + cat /tmp/tmp.duk1e2GREu + return 0 + kubectl_bin wait --for=delete namespace pitr-30181 + cat /tmp/tmp.WQ7aK4NJNL ++ mktemp + rm /tmp/tmp.duk1e2GREu /tmp/tmp.WQ7aK4NJNL + local LAST_OUT=/tmp/tmp.2x3xegrt0n ++ mktemp + return 0 + local LAST_ERR=/tmp/tmp.6VfkLXUY33 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace pitr-30181 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2x3xegrt0n + cat /tmp/tmp.6VfkLXUY33 + rm /tmp/tmp.2x3xegrt0n /tmp/tmp.6VfkLXUY33 + return 0 + desc 'create namespace pitr-30181' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pitr-30181 ----------------------------------------------------------------------------------- + kubectl_bin create namespace pitr-30181 ++ mktemp + local LAST_OUT=/tmp/tmp.fa1VscKWQN ++ mktemp + local LAST_ERR=/tmp/tmp.AGzNZUYC4w + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace pitr-30181 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fa1VscKWQN namespace/pitr-30181 created + cat /tmp/tmp.AGzNZUYC4w + rm /tmp/tmp.fa1VscKWQN /tmp/tmp.AGzNZUYC4w + return 0 + set_kube_ctx pitr-30181 + local namespace=pitr-30181 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.9KEnLOOLEv +++ mktemp ++ local LAST_ERR=/tmp/tmp.ss69cRSX7O ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9KEnLOOLEv ++ cat /tmp/tmp.ss69cRSX7O ++ rm /tmp/tmp.9KEnLOOLEv /tmp/tmp.ss69cRSX7O ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2104-591a1b50-1-cluster6 --namespace=pitr-30181 ++ mktemp + local LAST_OUT=/tmp/tmp.elsxu4B9no ++ mktemp + local LAST_ERR=/tmp/tmp.9MAUg2TAjW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2104-591a1b50-1-cluster6 --namespace=pitr-30181 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.elsxu4B9no Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2104-591a1b50-1-cluster6" modified. + cat /tmp/tmp.9MAUg2TAjW + rm /tmp/tmp.elsxu4B9no /tmp/tmp.9MAUg2TAjW + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Tue Nov 4 22:17:46 2025 NAMESPACE: pitr-30181 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.pitr-30181.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace pitr-30181 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace pitr-30181 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace pitr-30181 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace pitr-30181 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JfhhRFmqyl +++ mktemp ++ local LAST_ERR=/tmp/tmp.ea45v9JD59 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JfhhRFmqyl ++ cat /tmp/tmp.ea45v9JD59 ++ rm /tmp/tmp.JfhhRFmqyl /tmp/tmp.ea45v9JD59 ++ return 0 + MINIO_POD=minio-service-d9589b474-8rjzb + wait_pod minio-service-d9589b474-8rjzb + local pod=minio-service-d9589b474-8rjzb + set +o xtrace waiting for pod/minio-service-d9589b474-8rjzb to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-30181.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.B7bEiAaUPV ++ mktemp + local LAST_ERR=/tmp/tmp.WGcdxxKiTg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-30181.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.B7bEiAaUPV service/minio-service created + cat /tmp/tmp.WGcdxxKiTg + rm /tmp/tmp.B7bEiAaUPV /tmp/tmp.WGcdxxKiTg + return 0 + create_minio_bucket operator-testing + local bucket=operator-testing + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.CEpnh6V39q ++ mktemp + local LAST_ERR=/tmp/tmp.MKa1zeJVGU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CEpnh6V39q pod "aws-cli" deleted from pitr-30181 namespace + cat /tmp/tmp.MKa1zeJVGU All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.CEpnh6V39q /tmp/tmp.MKa1zeJVGU + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/conf/minio-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.UiRdw0vIQw ++ mktemp + local LAST_ERR=/tmp/tmp.UGPSHxhvAE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/conf/minio-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UiRdw0vIQw secret/some-users created deployment.apps/psmdb-client created secret/minio-secret created + cat /tmp/tmp.UGPSHxhvAE + rm /tmp/tmp.UiRdw0vIQw /tmp/tmp.UGPSHxhvAE + return 0 + cluster=some-name-rs0 + desc 'create first PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/conf/some-name-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/conf/some-name-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2104-591a1b50"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.77ySo0DF7o ++ mktemp + local LAST_ERR=/tmp/tmp.6VonQhdfl5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.77ySo0DF7o perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.6VonQhdfl5 + rm /tmp/tmp.77ySo0DF7o /tmp/tmp.6VonQhdfl5 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready................OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready........OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.P6JoDZGfaO +++ mktemp ++ local LAST_ERR=/tmp/tmp.dUbkEtAuD8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.P6JoDZGfaO ++ cat /tmp/tmp.dUbkEtAuD8 ++ rm /tmp/tmp.P6JoDZGfaO /tmp/tmp.dUbkEtAuD8 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HM0OOmSiC5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.3wEJGhqBMc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HM0OOmSiC5 ++ cat /tmp/tmp.3wEJGhqBMc ++ rm /tmp/tmp.HM0OOmSiC5 /tmp/tmp.3wEJGhqBMc ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0eDKCFBe9m +++ mktemp ++ local LAST_ERR=/tmp/tmp.cLFcfddOi7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0eDKCFBe9m ++ cat /tmp/tmp.cLFcfddOi7 ++ rm /tmp/tmp.0eDKCFBe9m /tmp/tmp.cLFcfddOi7 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness... + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.wGkpxEBpK1/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("pitr-30181", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.rfFdyqfPtG ++ mktemp + local LAST_ERR=/tmp/tmp.XgZRAS9tcE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rfFdyqfPtG + cat /tmp/tmp.XgZRAS9tcE + rm /tmp/tmp.rfFdyqfPtG /tmp/tmp.XgZRAS9tcE + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.wGkpxEBpK1/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.wGkpxEBpK1/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.wGkpxEBpK1/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/statefulset_some-name-rs0.yml /tmp/tmp.wGkpxEBpK1/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-11-04T22:20:20+0000] compare_kubectl: statefulset/some-name-rs0 OK + write_initial_data + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.pitr-30181 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.pitr-30181 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Y683CDWBJX +++ mktemp ++ local LAST_ERR=/tmp/tmp.woiBK5FpAh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Y683CDWBJX ++ cat /tmp/tmp.woiBK5FpAh ++ rm /tmp/tmp.Y683CDWBJX /tmp/tmp.woiBK5FpAh ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.yklCvVTn18 ++ mktemp + local LAST_ERR=/tmp/tmp.NJmqIxE3bB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yklCvVTn18 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("39a5223a-0c4b-4571-9a59-5d730146a198") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.NJmqIxE3bB + rm /tmp/tmp.yklCvVTn18 /tmp/tmp.NJmqIxE3bB + return 0 + sleep 2 + write_document + local cmp_postfix= + local cluster_name=some-name-rs0 + desc 'write initial data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write initial data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.pitr-30181 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.pitr-30181 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nGSseUcZKr +++ mktemp ++ local LAST_ERR=/tmp/tmp.S8mgEuj7eH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nGSseUcZKr ++ cat /tmp/tmp.S8mgEuj7eH ++ rm /tmp/tmp.nGSseUcZKr /tmp/tmp.S8mgEuj7eH ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.arXwDiEcdM ++ mktemp + local LAST_ERR=/tmp/tmp.BYBwZwdPfn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.arXwDiEcdM Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("bc77fccc-3ed0-47a1-af6e-99a41da4c36a") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.BYBwZwdPfn + rm /tmp/tmp.arXwDiEcdM /tmp/tmp.BYBwZwdPfn + return 0 + minikube_sleep + sleep_time=10 + [[ '' == 1 ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 '' + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:20:28+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.miQZAJb37J +++ mktemp ++ local LAST_ERR=/tmp/tmp.zsw97uNVHi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.miQZAJb37J ++ cat /tmp/tmp.zsw97uNVHi ++ rm /tmp/tmp.miQZAJb37J /tmp/tmp.zsw97uNVHi ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ZJ9Tb2WM9T ++ mktemp + local LAST_ERR=/tmp/tmp.xZLUhrfbMd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZJ9Tb2WM9T + cat /tmp/tmp.xZLUhrfbMd + rm /tmp/tmp.ZJ9Tb2WM9T /tmp/tmp.xZLUhrfbMd + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find.json /tmp/tmp.wGkpxEBpK1/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 '' + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:20:30+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8HVyJxdmQk +++ mktemp ++ local LAST_ERR=/tmp/tmp.HWUrzKDhpJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8HVyJxdmQk ++ cat /tmp/tmp.HWUrzKDhpJ ++ rm /tmp/tmp.8HVyJxdmQk /tmp/tmp.HWUrzKDhpJ ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.cO1SKC9yPA ++ mktemp + local LAST_ERR=/tmp/tmp.rRDmyyQeA6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cO1SKC9yPA + cat /tmp/tmp.rRDmyyQeA6 + rm /tmp/tmp.cO1SKC9yPA /tmp/tmp.rRDmyyQeA6 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find.json /tmp/tmp.wGkpxEBpK1/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 '' + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:20:33+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pmZ9f2yjTp +++ mktemp ++ local LAST_ERR=/tmp/tmp.Jw0eZIVkN8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pmZ9f2yjTp ++ cat /tmp/tmp.Jw0eZIVkN8 ++ rm /tmp/tmp.pmZ9f2yjTp /tmp/tmp.Jw0eZIVkN8 ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.xCf7ojRwG4 ++ mktemp + local LAST_ERR=/tmp/tmp.QIuXj6fsWm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xCf7ojRwG4 + cat /tmp/tmp.QIuXj6fsWm + rm /tmp/tmp.xCf7ojRwG4 /tmp/tmp.QIuXj6fsWm + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find.json /tmp/tmp.wGkpxEBpK1/find + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-0...2025-11-04T22:20:11.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-1...2025-11-04T22:20:11.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-2...2025-11-04T22:20:17.000+0000 I listening for the commands + backup_name_minio=backup-minio + run_mongo 'use myApp\n db.test2.insert({ x: 100501 })' myApp:myPass@some-name-rs0.pitr-30181 + local 'command=use myApp\n db.test2.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.pitr-30181 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CPKB4ieSPZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.92EcYLsECs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CPKB4ieSPZ ++ cat /tmp/tmp.92EcYLsECs ++ rm /tmp/tmp.CPKB4ieSPZ /tmp/tmp.92EcYLsECs ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test2.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.b1OL2yrBjx ++ mktemp + local LAST_ERR=/tmp/tmp.GnA9XebseY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test2.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.b1OL2yrBjx Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("f2470203-8882-4fe3-a98d-35fd4849133c") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.GnA9XebseY + rm /tmp/tmp.b1OL2yrBjx /tmp/tmp.GnA9XebseY + return 0 + run_backup backup-minio 0 + local name=backup-minio + local idx=0 + desc 'run backup backup-minio-0' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-0 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/conf/backup-minio.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-0/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.vGecsAHCOH ++ mktemp + local LAST_ERR=/tmp/tmp.K1KhdZFLHb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vGecsAHCOH perconaservermongodbbackup.psmdb.percona.com/backup-minio-0 created + cat /tmp/tmp.K1KhdZFLHb + rm /tmp/tmp.vGecsAHCOH /tmp/tmp.K1KhdZFLHb + return 0 + wait_backup backup-minio-0 + local backup_name=backup-minio-0 + local target_state=ready + set +o xtrace waiting for backup-minio-0 to reach ready state.......OK + write_document -2nd + local cmp_postfix=-2nd + local cluster_name=some-name-rs0 + desc 'write initial data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write initial data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.pitr-30181 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.pitr-30181 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qaGL5LdVcU +++ mktemp ++ local LAST_ERR=/tmp/tmp.4g6IG7TjZk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qaGL5LdVcU ++ cat /tmp/tmp.4g6IG7TjZk ++ rm /tmp/tmp.qaGL5LdVcU /tmp/tmp.4g6IG7TjZk ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.36r06ngq7a ++ mktemp + local LAST_ERR=/tmp/tmp.ebr0CPIR5g + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.36r06ngq7a Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("d75aaec4-1fc0-4ebf-afbe-4d3cf1a1396e") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.ebr0CPIR5g + rm /tmp/tmp.36r06ngq7a /tmp/tmp.ebr0CPIR5g + return 0 + minikube_sleep + sleep_time=10 + [[ '' == 1 ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:21:00+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ky5asiv5Jv +++ mktemp ++ local LAST_ERR=/tmp/tmp.yUN2Uz8UoW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ky5asiv5Jv ++ cat /tmp/tmp.yUN2Uz8UoW ++ rm /tmp/tmp.Ky5asiv5Jv /tmp/tmp.yUN2Uz8UoW ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.oiGRQ7Egwl ++ mktemp + local LAST_ERR=/tmp/tmp.9Kn1r7c9aH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oiGRQ7Egwl + cat /tmp/tmp.9Kn1r7c9aH + rm /tmp/tmp.oiGRQ7Egwl /tmp/tmp.9Kn1r7c9aH + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-2nd.json /tmp/tmp.wGkpxEBpK1/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:21:03+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iiu9KvfvV3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.IHfaHVwfoq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iiu9KvfvV3 ++ cat /tmp/tmp.IHfaHVwfoq ++ rm /tmp/tmp.iiu9KvfvV3 /tmp/tmp.IHfaHVwfoq ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.6Cc2608W9a ++ mktemp + local LAST_ERR=/tmp/tmp.xTekq1kkFj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6Cc2608W9a + cat /tmp/tmp.xTekq1kkFj + rm /tmp/tmp.6Cc2608W9a /tmp/tmp.xTekq1kkFj + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-2nd.json /tmp/tmp.wGkpxEBpK1/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:21:06+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 mongodb '' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tiRvljAK9w +++ mktemp ++ local LAST_ERR=/tmp/tmp.BEpx39VJcm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tiRvljAK9w ++ cat /tmp/tmp.BEpx39VJcm ++ rm /tmp/tmp.tiRvljAK9w /tmp/tmp.BEpx39VJcm ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.mPHwsYAwlk ++ mktemp + local LAST_ERR=/tmp/tmp.cPiXWRXjNw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mPHwsYAwlk + cat /tmp/tmp.cPiXWRXjNw + rm /tmp/tmp.mPHwsYAwlk /tmp/tmp.cPiXWRXjNw + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-2nd.json /tmp/tmp.wGkpxEBpK1/find-2nd + sleep 2 ++ run_mongo 'new Date().toISOString()' myApp:myPass@some-name-rs0.pitr-30181 mongodb '' --quiet ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match' ++ local 'command=new Date().toISOString()' ++ local uri=myApp:myPass@some-name-rs0.pitr-30181 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ tr T ' ' ++ cut -c1-19 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DnZHYAyL8R ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XzxdFJEgGm +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.DnZHYAyL8R +++ cat /tmp/tmp.XzxdFJEgGm +++ rm /tmp/tmp.DnZHYAyL8R /tmp/tmp.XzxdFJEgGm +++ return 0 ++ local client_container=psmdb-client-696897d69b-d5dfh ++ local mongo_flag=--quiet ++ [[ myApp:myPass@some-name-rs0.pitr-30181 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''new Date().toISOString()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.liLTJeDKvw +++ mktemp ++ local LAST_ERR=/tmp/tmp.XI1w2E0wtL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''new Date().toISOString()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.liLTJeDKvw ++ cat /tmp/tmp.XI1w2E0wtL ++ rm /tmp/tmp.liLTJeDKvw /tmp/tmp.XI1w2E0wtL ++ return 0 + time_now='2025-11-04 22:21:14' + check_recovery backup-minio-0 date '2025-11-04 22:21:14' -2nd '' some-name test2 + local backup_name=backup-minio-0 + local restore_type=date + local 'restore_date=2025-11-04 22:21:14' + local cmp_postfix=-2nd + local backupSource= + local cluster_name=some-name + local selective_collection=test2 + local restore_name=restore-backup-minio-0 + local restore_file=restore.yml + local cluster + cluster=some-name-rs0 + desc 'write more data before restore by date' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by date ----------------------------------------------------------------------------------- + sleep 60 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.pitr-30181 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.pitr-30181 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Rymz6FBVH0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.nMoknLCzld ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Rymz6FBVH0 ++ cat /tmp/tmp.nMoknLCzld ++ rm /tmp/tmp.Rymz6FBVH0 /tmp/tmp.nMoknLCzld ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.PP2V7k2l4K ++ mktemp + local LAST_ERR=/tmp/tmp.PQLGhX7urD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PP2V7k2l4K Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("762f5cbc-868f-46bb-908c-1c7b123f8c75") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.PQLGhX7urD + rm /tmp/tmp.PP2V7k2l4K /tmp/tmp.PQLGhX7urD + return 0 + '[' -n test2 ']' + run_mongo 'use myApp\n db.test2.drop()' myApp:myPass@some-name-rs0.pitr-30181 + local 'command=use myApp\n db.test2.drop()' + local uri=myApp:myPass@some-name-rs0.pitr-30181 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YsGp8sP6iC +++ mktemp ++ local LAST_ERR=/tmp/tmp.cDpOtFVMiX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YsGp8sP6iC ++ cat /tmp/tmp.cDpOtFVMiX ++ rm /tmp/tmp.YsGp8sP6iC /tmp/tmp.cDpOtFVMiX ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test2.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.9tS9HEUL6C ++ mktemp + local LAST_ERR=/tmp/tmp.NgxZg2iP0z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test2.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9tS9HEUL6C Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("4dbe0667-033f-44d8-b58c-9b3d9f637eef") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.NgxZg2iP0z + rm /tmp/tmp.9tS9HEUL6C /tmp/tmp.NgxZg2iP0z + return 0 + desc 'waiting for chunks to be uploaded' + set +o xtrace ----------------------------------------------------------------------------------- waiting for chunks to be uploaded ----------------------------------------------------------------------------------- + sleep 120 + desc 'check restore by date' + set +o xtrace ----------------------------------------------------------------------------------- check restore by date ----------------------------------------------------------------------------------- + '[' -z '' ']' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-0/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio-0/' + /usr/sbin/sed -e 's/clusterName:/clusterName: some-name/' + /usr/sbin/sed -e 's/pitrType:/type: date/' + '[' -z '2025-11-04 22:21:14' ']' + /usr/sbin/sed -e 's/date:/date: 2025-11-04 22:21:14/' + /usr/sbin/sed -e /backupSource/,+2d + '[' -n test2 ']' + yq eval '.spec.selective = {"namespaces": ["myApp.test"], "withUsersAndRoles": true}' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.pcy3mHfVMI ++ mktemp + local LAST_ERR=/tmp/tmp.MOdHBmAHEn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pcy3mHfVMI perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-0 created + cat /tmp/tmp.MOdHBmAHEn + rm /tmp/tmp.pcy3mHfVMI /tmp/tmp.MOdHBmAHEn + return 0 + wait_restore backup-minio-0 some-name + local backup_name=backup-minio-0 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-0 object to be created.OK Waiting psmdb-restore/restore-backup-minio-0 to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LRPPBcBHFZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.7M8rcG2IhO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LRPPBcBHFZ ++ cat /tmp/tmp.7M8rcG2IhO ++ rm /tmp/tmp.LRPPBcBHFZ /tmp/tmp.7M8rcG2IhO ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + echo + set -o xtrace + '[' -n test2 ']' ++ collection_exists test2 ./e2e-tests/pitr/run: line 108: collection_exists: command not found + [[ '' == \t\r\u\e ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:24:57+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.z8tyaKOtWP +++ mktemp ++ local LAST_ERR=/tmp/tmp.sUKIhOXPbF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.z8tyaKOtWP ++ cat /tmp/tmp.sUKIhOXPbF ++ rm /tmp/tmp.z8tyaKOtWP /tmp/tmp.sUKIhOXPbF ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.2XvZFkvvK5 ++ mktemp + local LAST_ERR=/tmp/tmp.Y5HSwfiazu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2XvZFkvvK5 + cat /tmp/tmp.Y5HSwfiazu + rm /tmp/tmp.2XvZFkvvK5 /tmp/tmp.Y5HSwfiazu + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-2nd.json /tmp/tmp.wGkpxEBpK1/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:25:00+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 + local driver=mongodb + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SVWqoLkENh +++ mktemp ++ local LAST_ERR=/tmp/tmp.6XNzWx0Vhl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SVWqoLkENh ++ cat /tmp/tmp.6XNzWx0Vhl ++ rm /tmp/tmp.SVWqoLkENh /tmp/tmp.6XNzWx0Vhl ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.EIpSNP3hFR ++ mktemp + local LAST_ERR=/tmp/tmp.ekQRmuklHH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EIpSNP3hFR + cat /tmp/tmp.ekQRmuklHH + rm /tmp/tmp.EIpSNP3hFR /tmp/tmp.ekQRmuklHH + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-2nd.json /tmp/tmp.wGkpxEBpK1/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:25:03+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DYN2I4OPxX +++ mktemp ++ local LAST_ERR=/tmp/tmp.wuZ8RFCsXC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DYN2I4OPxX ++ cat /tmp/tmp.wuZ8RFCsXC ++ rm /tmp/tmp.DYN2I4OPxX /tmp/tmp.wuZ8RFCsXC ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.zNj76M0ViJ ++ mktemp + local LAST_ERR=/tmp/tmp.b9fr1nmJ6D + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zNj76M0ViJ + cat /tmp/tmp.b9fr1nmJ6D + rm /tmp/tmp.zNj76M0ViJ /tmp/tmp.b9fr1nmJ6D + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-2nd.json /tmp/tmp.wGkpxEBpK1/find-2nd + run_backup backup-minio 1 + local name=backup-minio + local idx=1 + desc 'run backup backup-minio-1' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-1 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/conf/backup-minio.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-1/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.m29Iv8SmqL ++ mktemp + local LAST_ERR=/tmp/tmp.E3woLqlAnN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.m29Iv8SmqL perconaservermongodbbackup.psmdb.percona.com/backup-minio-1 created + cat /tmp/tmp.E3woLqlAnN + rm /tmp/tmp.m29Iv8SmqL /tmp/tmp.E3woLqlAnN + return 0 + wait_backup backup-minio-1 + local backup_name=backup-minio-1 + local target_state=ready + set +o xtrace waiting for backup-minio-1 to reach ready state.......OK + compare_latest_restorable_time some-name-rs0 backup-minio-1 + local cluster=some-name-rs0 + local backup_name=backup-minio-1 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.riAi9P6XiX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ShXSiDQoCS +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.riAi9P6XiX +++ cat /tmp/tmp.ShXSiDQoCS +++ rm /tmp/tmp.riAi9P6XiX /tmp/tmp.ShXSiDQoCS +++ return 0 ++ first_timestamp=1762295074 ++ sleep 5 ++ [[ 1762295074 != '' ]] ++ [[ 1762295074 != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6dDAaGavGl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zDYeXcuFFj +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6dDAaGavGl +++ cat /tmp/tmp.zDYeXcuFFj +++ rm /tmp/tmp.6dDAaGavGl /tmp/tmp.zDYeXcuFFj +++ return 0 ++ second_timestamp=1762295074 ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 1762295074 != '' ]] ++ [[ 1762295074 != \n\u\l\l ]] ++ [[ 1762295074 == 1762295074 ]] ++ /usr/sbin/date -u -d @1762295074 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2025-11-04T22:24:34Z ++ get_latest_restorable_time_from_backup_object backup-minio-1 ++ local backup_name=backup-minio-1 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-1 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1LGte4jHng ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NAcigUiuE2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb-backup backup-minio-1 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.1LGte4jHng +++ cat /tmp/tmp.NAcigUiuE2 +++ rm /tmp/tmp.1LGte4jHng /tmp/tmp.NAcigUiuE2 +++ return 0 ++ latestRestorableTime=2025-11-04T22:24:34Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2025-11-04T22:24:34Z != '' ]] ++ [[ 2025-11-04T22:24:34Z != \n\u\l\l ]] ++ echo 2025-11-04T22:24:34Z + backup_time=2025-11-04T22:24:34Z + [[ 2025-11-04T22:24:34Z != \2\0\2\5\-\1\1\-\0\4\T\2\2\:\2\4\:\3\4\Z ]] + check_recovery backup-minio-1 latest '' -3rd '' some-name + local backup_name=backup-minio-1 + local restore_type=latest + local restore_date= + local cmp_postfix=-3rd + local backupSource= + local cluster_name=some-name + local selective_collection= + local restore_name=restore-backup-minio-1 + local restore_file=restore.yml + local cluster + cluster=some-name-rs0 + desc 'write more data before restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by latest ----------------------------------------------------------------------------------- + sleep 60 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.pitr-30181 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.pitr-30181 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TftR6517y3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.yAB7zmekxz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TftR6517y3 ++ cat /tmp/tmp.yAB7zmekxz ++ rm /tmp/tmp.TftR6517y3 /tmp/tmp.yAB7zmekxz ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Zjhcr9Poo1 ++ mktemp + local LAST_ERR=/tmp/tmp.XHdYpwDHGo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Zjhcr9Poo1 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("cad562e6-8eb2-45da-a630-4a4fd0bf812b") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.XHdYpwDHGo + rm /tmp/tmp.Zjhcr9Poo1 /tmp/tmp.XHdYpwDHGo + return 0 + '[' -n '' ']' + desc 'waiting for chunks to be uploaded' + set +o xtrace ----------------------------------------------------------------------------------- waiting for chunks to be uploaded ----------------------------------------------------------------------------------- + sleep 120 + desc 'check restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- check restore by latest ----------------------------------------------------------------------------------- + '[' -z '' ']' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-1/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio-1/' + /usr/sbin/sed -e 's/clusterName:/clusterName: some-name/' + /usr/sbin/sed -e 's/pitrType:/type: latest/' + '[' -z '' ']' + /usr/sbin/sed -e /date:/d + '[' -n '' ']' + /usr/sbin/sed -e /backupSource/,+2d + kubectl_bin apply -f - + yq ++ mktemp + local LAST_OUT=/tmp/tmp.2yyoJ1erDx ++ mktemp + local LAST_ERR=/tmp/tmp.cUroky12m4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2yyoJ1erDx perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-1 created + cat /tmp/tmp.cUroky12m4 + rm /tmp/tmp.2yyoJ1erDx /tmp/tmp.cUroky12m4 + return 0 + wait_restore backup-minio-1 some-name + local backup_name=backup-minio-1 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-1 object to be created.OK Waiting psmdb-restore/restore-backup-minio-1 to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HadT2drmWw +++ mktemp ++ local LAST_ERR=/tmp/tmp.2WgAW1an9r ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HadT2drmWw ++ cat /tmp/tmp.2WgAW1an9r ++ rm /tmp/tmp.HadT2drmWw /tmp/tmp.2WgAW1an9r ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + echo + set -o xtrace + '[' -n '' ']' + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 -3rd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:29:05+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0bhGzNQQWa +++ mktemp ++ local LAST_ERR=/tmp/tmp.BgtH4gRGe5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0bhGzNQQWa ++ cat /tmp/tmp.BgtH4gRGe5 ++ rm /tmp/tmp.0bhGzNQQWa /tmp/tmp.BgtH4gRGe5 ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.HzxMXm3gE5 ++ mktemp + local LAST_ERR=/tmp/tmp.GNJ8dULuZs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HzxMXm3gE5 + cat /tmp/tmp.GNJ8dULuZs + rm /tmp/tmp.HzxMXm3gE5 /tmp/tmp.GNJ8dULuZs + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-3rd.json /tmp/tmp.wGkpxEBpK1/find-3rd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 -3rd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:29:08+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mybdNdyjMl +++ mktemp ++ local LAST_ERR=/tmp/tmp.HGa9ftZnGU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mybdNdyjMl ++ cat /tmp/tmp.HGa9ftZnGU ++ rm /tmp/tmp.mybdNdyjMl /tmp/tmp.HGa9ftZnGU ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.hSDEAiLfMW ++ mktemp + local LAST_ERR=/tmp/tmp.8PI1ZQgaED + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hSDEAiLfMW + cat /tmp/tmp.8PI1ZQgaED + rm /tmp/tmp.hSDEAiLfMW /tmp/tmp.8PI1ZQgaED + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-3rd.json /tmp/tmp.wGkpxEBpK1/find-3rd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 -3rd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:29:10+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hhgSK1WAgN +++ mktemp ++ local LAST_ERR=/tmp/tmp.6KM34IPhDO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hhgSK1WAgN ++ cat /tmp/tmp.6KM34IPhDO ++ rm /tmp/tmp.hhgSK1WAgN /tmp/tmp.6KM34IPhDO ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.cVMSLurFEC ++ mktemp + local LAST_ERR=/tmp/tmp.qo93vsD6if + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cVMSLurFEC + cat /tmp/tmp.qo93vsD6if + rm /tmp/tmp.cVMSLurFEC /tmp/tmp.qo93vsD6if + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-3rd.json /tmp/tmp.wGkpxEBpK1/find-3rd + desc 'restore pitr using backupSource' + set +o xtrace ----------------------------------------------------------------------------------- restore pitr using backupSource ----------------------------------------------------------------------------------- + reset_collection + desc 'reset data' + set +o xtrace ----------------------------------------------------------------------------------- reset data ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.remove({})' myApp:myPass@some-name-rs0.pitr-30181 + local 'command=use myApp\n db.test.remove({})' + local uri=myApp:myPass@some-name-rs0.pitr-30181 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xY6qMQ1u7o +++ mktemp ++ local LAST_ERR=/tmp/tmp.KIFsrEaus6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xY6qMQ1u7o ++ cat /tmp/tmp.KIFsrEaus6 ++ rm /tmp/tmp.xY6qMQ1u7o /tmp/tmp.KIFsrEaus6 ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.remove({})\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.08VV9TDWYQ ++ mktemp + local LAST_ERR=/tmp/tmp.AtvWwNERzS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.remove({})\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.08VV9TDWYQ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("6a1d7e7d-34c3-4ecc-b7c1-c12795e1883e") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nRemoved" : 3 }) bye + cat /tmp/tmp.AtvWwNERzS + rm /tmp/tmp.08VV9TDWYQ /tmp/tmp.AtvWwNERzS + return 0 + sleep 2 + write_document + local cmp_postfix= + local cluster_name=some-name-rs0 + desc 'write initial data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write initial data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.pitr-30181 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.pitr-30181 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tp5heoKkhb +++ mktemp ++ local LAST_ERR=/tmp/tmp.26zyHMAXdk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tp5heoKkhb ++ cat /tmp/tmp.26zyHMAXdk ++ rm /tmp/tmp.tp5heoKkhb /tmp/tmp.26zyHMAXdk ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.BsQxvl2I6c ++ mktemp + local LAST_ERR=/tmp/tmp.yDfeGIxTdA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BsQxvl2I6c Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c68988e6-66d0-4771-bb14-63360ef4e144") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.yDfeGIxTdA + rm /tmp/tmp.BsQxvl2I6c /tmp/tmp.yDfeGIxTdA + return 0 + minikube_sleep + sleep_time=10 + [[ '' == 1 ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 '' + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:29:19+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GeLJGnVRx4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.s8YElyinAa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GeLJGnVRx4 ++ cat /tmp/tmp.s8YElyinAa ++ rm /tmp/tmp.GeLJGnVRx4 /tmp/tmp.s8YElyinAa ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.xf07FDq3qT ++ mktemp + local LAST_ERR=/tmp/tmp.nHQcRu2No1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xf07FDq3qT + cat /tmp/tmp.nHQcRu2No1 + rm /tmp/tmp.xf07FDq3qT /tmp/tmp.nHQcRu2No1 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find.json /tmp/tmp.wGkpxEBpK1/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 '' + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:29:21+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3I8wyJ0CaD +++ mktemp ++ local LAST_ERR=/tmp/tmp.EUXOKvUrYA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3I8wyJ0CaD ++ cat /tmp/tmp.EUXOKvUrYA ++ rm /tmp/tmp.3I8wyJ0CaD /tmp/tmp.EUXOKvUrYA ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.c6YFJtwa5u ++ mktemp + local LAST_ERR=/tmp/tmp.z2nTErKgak + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.c6YFJtwa5u + cat /tmp/tmp.z2nTErKgak + rm /tmp/tmp.c6YFJtwa5u /tmp/tmp.z2nTErKgak + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find.json /tmp/tmp.wGkpxEBpK1/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 '' + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:29:24+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lUEyVpZbt2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.BA2U2ZlWfy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lUEyVpZbt2 ++ cat /tmp/tmp.BA2U2ZlWfy ++ rm /tmp/tmp.lUEyVpZbt2 /tmp/tmp.BA2U2ZlWfy ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.f5ZlxEcU1p ++ mktemp + local LAST_ERR=/tmp/tmp.e0VzVKZT1k + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.f5ZlxEcU1p + cat /tmp/tmp.e0VzVKZT1k + rm /tmp/tmp.f5ZlxEcU1p /tmp/tmp.e0VzVKZT1k + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find.json /tmp/tmp.wGkpxEBpK1/find + run_backup backup-minio 2 + local name=backup-minio + local idx=2 + desc 'run backup backup-minio-2' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-2 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/conf/backup-minio.yml + kubectl_bin apply -f - + /usr/sbin/sed -e 's/name:/name: backup-minio-2/' ++ mktemp + local LAST_OUT=/tmp/tmp.GK5MSLDHnG ++ mktemp + local LAST_ERR=/tmp/tmp.PKv1qpwH4z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GK5MSLDHnG perconaservermongodbbackup.psmdb.percona.com/backup-minio-2 created + cat /tmp/tmp.PKv1qpwH4z + rm /tmp/tmp.GK5MSLDHnG /tmp/tmp.PKv1qpwH4z + return 0 + wait_backup backup-minio-2 + local backup_name=backup-minio-2 + local target_state=ready + set +o xtrace waiting for backup-minio-2 to reach ready state......OK + compare_latest_restorable_time some-name-rs0 backup-minio-2 + local cluster=some-name-rs0 + local backup_name=backup-minio-2 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.G9y4CRPeb3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pG7qF8Al9U +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.G9y4CRPeb3 +++ cat /tmp/tmp.pG7qF8Al9U +++ rm /tmp/tmp.G9y4CRPeb3 /tmp/tmp.pG7qF8Al9U +++ return 0 ++ first_timestamp=1762295325 ++ sleep 5 ++ [[ 1762295325 != '' ]] ++ [[ 1762295325 != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZPN2Y0nkHA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VdIHCNTBrd +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ZPN2Y0nkHA +++ cat /tmp/tmp.VdIHCNTBrd +++ rm /tmp/tmp.ZPN2Y0nkHA /tmp/tmp.VdIHCNTBrd +++ return 0 ++ second_timestamp=1762295325 ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 1762295325 != '' ]] ++ [[ 1762295325 != \n\u\l\l ]] ++ [[ 1762295325 == 1762295325 ]] ++ /usr/sbin/date -u -d @1762295325 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2025-11-04T22:28:45Z ++ get_latest_restorable_time_from_backup_object backup-minio-2 ++ local backup_name=backup-minio-2 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-2 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qCu9iVpoaT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.G1lyx8YGl7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb-backup backup-minio-2 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.qCu9iVpoaT +++ cat /tmp/tmp.G1lyx8YGl7 +++ rm /tmp/tmp.qCu9iVpoaT /tmp/tmp.G1lyx8YGl7 +++ return 0 ++ latestRestorableTime=2025-11-04T22:28:45Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2025-11-04T22:28:45Z != '' ]] ++ [[ 2025-11-04T22:28:45Z != \n\u\l\l ]] ++ echo 2025-11-04T22:28:45Z + backup_time=2025-11-04T22:28:45Z + [[ 2025-11-04T22:28:45Z != \2\0\2\5\-\1\1\-\0\4\T\2\2\:\2\8\:\4\5\Z ]] ++ run_mongo 'new Date().toISOString()' myApp:myPass@some-name-rs0.pitr-30181 mongodb '' --quiet ++ cut -c1-19 ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match' ++ local 'command=new Date().toISOString()' ++ local uri=myApp:myPass@some-name-rs0.pitr-30181 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ tr T ' ' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.haTNzlvQcp ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2N3oAbKywf +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.haTNzlvQcp +++ cat /tmp/tmp.2N3oAbKywf +++ rm /tmp/tmp.haTNzlvQcp /tmp/tmp.2N3oAbKywf +++ return 0 ++ local client_container=psmdb-client-696897d69b-d5dfh ++ local mongo_flag=--quiet ++ [[ myApp:myPass@some-name-rs0.pitr-30181 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''new Date().toISOString()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.em8FuLVzsk +++ mktemp ++ local LAST_ERR=/tmp/tmp.ewNkDSWitn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''new Date().toISOString()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.em8FuLVzsk ++ cat /tmp/tmp.ewNkDSWitn ++ rm /tmp/tmp.em8FuLVzsk /tmp/tmp.ewNkDSWitn ++ return 0 + time_now='2025-11-04 22:29:59' + check_recovery backup-minio-2 date '2025-11-04 22:29:59' '' backupSource some-name + local backup_name=backup-minio-2 + local restore_type=date + local 'restore_date=2025-11-04 22:29:59' + local cmp_postfix= + local backupSource=backupSource + local cluster_name=some-name + local selective_collection= + local restore_name=restore-backup-minio-2 + local restore_file=restore.yml + local cluster + cluster=some-name-rs0 + desc 'write more data before restore by date' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by date ----------------------------------------------------------------------------------- + sleep 60 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.pitr-30181 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.pitr-30181 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5cTNCDKTTO +++ mktemp ++ local LAST_ERR=/tmp/tmp.LPZ1rSq3SZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5cTNCDKTTO ++ cat /tmp/tmp.LPZ1rSq3SZ ++ rm /tmp/tmp.5cTNCDKTTO /tmp/tmp.LPZ1rSq3SZ ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.U5Wq80aBi7 ++ mktemp + local LAST_ERR=/tmp/tmp.GVvxzzdEzL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.U5Wq80aBi7 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e7269294-3c20-4baa-b0cf-b1f8dd3b134e") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.GVvxzzdEzL + rm /tmp/tmp.U5Wq80aBi7 /tmp/tmp.GVvxzzdEzL + return 0 + '[' -n '' ']' + desc 'waiting for chunks to be uploaded' + set +o xtrace ----------------------------------------------------------------------------------- waiting for chunks to be uploaded ----------------------------------------------------------------------------------- + sleep 120 + desc 'check restore by date' + set +o xtrace ----------------------------------------------------------------------------------- check restore by date ----------------------------------------------------------------------------------- + '[' -z backupSource ']' ++ get_backup_dest backup-minio-2 ++ local backup_name=backup-minio-2 ++ kubectl_bin get psmdb-backup backup-minio-2 -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ mktemp ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.w8du8trunM +++ mktemp ++ local LAST_ERR=/tmp/tmp.tKp2D3JiTl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-2 -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.w8du8trunM ++ cat /tmp/tmp.tKp2D3JiTl ++ rm /tmp/tmp.w8du8trunM /tmp/tmp.tKp2D3JiTl ++ return 0 + backup_dest=operator-testing/pitr-prefix-1/2025-11-04T22:29:30Z + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-2/' + /usr/sbin/sed -e /backupName/d + /usr/sbin/sed -e 's/clusterName:/clusterName: some-name/' + /usr/sbin/sed -e 's/pitrType:/type: date/' + '[' -z '2025-11-04 22:29:59' ']' + /usr/sbin/sed -e 's/date:/date: 2025-11-04 22:29:59/' + /usr/sbin/sed -e 's|BACKUP-NAME|operator-testing/pitr-prefix-1/2025-11-04T22:29:30Z|' + '[' -n '' ']' + yq + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.JyptYquho6 ++ mktemp + local LAST_ERR=/tmp/tmp.4e6ZkqaYca + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JyptYquho6 perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-2 created + cat /tmp/tmp.4e6ZkqaYca + rm /tmp/tmp.JyptYquho6 /tmp/tmp.4e6ZkqaYca + return 0 + wait_restore backup-minio-2 some-name + local backup_name=backup-minio-2 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-2 object to be created.OK Waiting psmdb-restore/restore-backup-minio-2 to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QHtlEUQWJ8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.8rVOiDAkfw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QHtlEUQWJ8 ++ cat /tmp/tmp.8rVOiDAkfw ++ rm /tmp/tmp.QHtlEUQWJ8 /tmp/tmp.8rVOiDAkfw ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + echo + set -o xtrace + '[' -n '' ']' + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 '' + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:33:30+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sO4WgtV6tf +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ma2LKkbf8p ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sO4WgtV6tf ++ cat /tmp/tmp.Ma2LKkbf8p ++ rm /tmp/tmp.sO4WgtV6tf /tmp/tmp.Ma2LKkbf8p ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.5MNd0yvx3A ++ mktemp + local LAST_ERR=/tmp/tmp.9W03ievvLV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5MNd0yvx3A + cat /tmp/tmp.9W03ievvLV + rm /tmp/tmp.5MNd0yvx3A /tmp/tmp.9W03ievvLV + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find.json /tmp/tmp.wGkpxEBpK1/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 '' + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:33:33+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Gy3JHcLHDt +++ mktemp ++ local LAST_ERR=/tmp/tmp.KXxQrhIj0n ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Gy3JHcLHDt ++ cat /tmp/tmp.KXxQrhIj0n ++ rm /tmp/tmp.Gy3JHcLHDt /tmp/tmp.KXxQrhIj0n ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.c5AdJnbzTq ++ mktemp + local LAST_ERR=/tmp/tmp.7OtV5tlL4p + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.c5AdJnbzTq + cat /tmp/tmp.7OtV5tlL4p + rm /tmp/tmp.c5AdJnbzTq /tmp/tmp.7OtV5tlL4p + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find.json /tmp/tmp.wGkpxEBpK1/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 '' + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:33:35+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.o2YoSIrNB9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.p63hRAYSuY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.o2YoSIrNB9 ++ cat /tmp/tmp.p63hRAYSuY ++ rm /tmp/tmp.o2YoSIrNB9 /tmp/tmp.p63hRAYSuY ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.aJdXr8TyCF ++ mktemp + local LAST_ERR=/tmp/tmp.g48BLeEeIi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aJdXr8TyCF + cat /tmp/tmp.g48BLeEeIi + rm /tmp/tmp.aJdXr8TyCF /tmp/tmp.g48BLeEeIi + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find.json /tmp/tmp.wGkpxEBpK1/find + run_backup backup-minio 3 + local name=backup-minio + local idx=3 + desc 'run backup backup-minio-3' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-3 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/conf/backup-minio.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-3/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.6naHoq6vLI ++ mktemp + local LAST_ERR=/tmp/tmp.1BDbon4De1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6naHoq6vLI perconaservermongodbbackup.psmdb.percona.com/backup-minio-3 created + cat /tmp/tmp.1BDbon4De1 + rm /tmp/tmp.6naHoq6vLI /tmp/tmp.1BDbon4De1 + return 0 + wait_backup backup-minio-3 + local backup_name=backup-minio-3 + local target_state=ready + set +o xtrace waiting for backup-minio-3 to reach ready state......OK + compare_latest_restorable_time some-name-rs0 backup-minio-3 + local cluster=some-name-rs0 + local backup_name=backup-minio-3 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FVnbGyOLBX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mWHPAWr26E +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.FVnbGyOLBX +++ cat /tmp/tmp.mWHPAWr26E +++ rm /tmp/tmp.FVnbGyOLBX /tmp/tmp.mWHPAWr26E +++ return 0 ++ first_timestamp=1762295589 ++ sleep 5 ++ [[ 1762295589 != '' ]] ++ [[ 1762295589 != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.eGzu5vOTL8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.woCP1tYbid +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.eGzu5vOTL8 +++ cat /tmp/tmp.woCP1tYbid +++ rm /tmp/tmp.eGzu5vOTL8 /tmp/tmp.woCP1tYbid +++ return 0 ++ second_timestamp=1762295589 ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 1762295589 != '' ]] ++ [[ 1762295589 != \n\u\l\l ]] ++ [[ 1762295589 == 1762295589 ]] ++ /usr/sbin/date -u -d @1762295589 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2025-11-04T22:33:09Z ++ get_latest_restorable_time_from_backup_object backup-minio-3 ++ local backup_name=backup-minio-3 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-3 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.E6tK8GJwAi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hyl7EZVCIh +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb-backup backup-minio-3 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.E6tK8GJwAi +++ cat /tmp/tmp.hyl7EZVCIh +++ rm /tmp/tmp.E6tK8GJwAi /tmp/tmp.hyl7EZVCIh +++ return 0 ++ latestRestorableTime=2025-11-04T22:33:09Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2025-11-04T22:33:09Z != '' ]] ++ [[ 2025-11-04T22:33:09Z != \n\u\l\l ]] ++ echo 2025-11-04T22:33:09Z + backup_time=2025-11-04T22:33:09Z + [[ 2025-11-04T22:33:09Z != \2\0\2\5\-\1\1\-\0\4\T\2\2\:\3\3\:\0\9\Z ]] + check_recovery backup-minio-3 latest '' -4th backupSource some-name + local backup_name=backup-minio-3 + local restore_type=latest + local restore_date= + local cmp_postfix=-4th + local backupSource=backupSource + local cluster_name=some-name + local selective_collection= + local restore_name=restore-backup-minio-3 + local restore_file=restore.yml + local cluster + cluster=some-name-rs0 + desc 'write more data before restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by latest ----------------------------------------------------------------------------------- + sleep 60 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.pitr-30181 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.pitr-30181 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CevhrDs9We +++ mktemp ++ local LAST_ERR=/tmp/tmp.r1rnrOs4rK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CevhrDs9We ++ cat /tmp/tmp.r1rnrOs4rK ++ rm /tmp/tmp.CevhrDs9We /tmp/tmp.r1rnrOs4rK ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.YTRFDar8PF ++ mktemp + local LAST_ERR=/tmp/tmp.8tqbnhzEP2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YTRFDar8PF Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("795937a7-bbcd-4943-a64d-f55ec9f5ecd8") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.8tqbnhzEP2 + rm /tmp/tmp.YTRFDar8PF /tmp/tmp.8tqbnhzEP2 + return 0 + '[' -n '' ']' + desc 'waiting for chunks to be uploaded' + set +o xtrace ----------------------------------------------------------------------------------- waiting for chunks to be uploaded ----------------------------------------------------------------------------------- + sleep 120 + desc 'check restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- check restore by latest ----------------------------------------------------------------------------------- + '[' -z backupSource ']' ++ get_backup_dest backup-minio-3 ++ local backup_name=backup-minio-3 ++ kubectl_bin get psmdb-backup backup-minio-3 -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' ++ sed 's|gs://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EuDu5mIQzw +++ mktemp ++ local LAST_ERR=/tmp/tmp.Xna5JhMlzD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-3 -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EuDu5mIQzw ++ cat /tmp/tmp.Xna5JhMlzD ++ rm /tmp/tmp.EuDu5mIQzw /tmp/tmp.Xna5JhMlzD ++ return 0 + backup_dest=operator-testing/pitr-prefix-1/2025-11-04T22:33:40Z + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-3/' + /usr/sbin/sed -e /backupName/d + /usr/sbin/sed -e 's/clusterName:/clusterName: some-name/' + '[' -z '' ']' + /usr/sbin/sed -e /date:/d + /usr/sbin/sed -e 's|BACKUP-NAME|operator-testing/pitr-prefix-1/2025-11-04T22:33:40Z|' + /usr/sbin/sed -e 's/pitrType:/type: latest/' + '[' -n '' ']' + yq + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.iHvBlC68O1 ++ mktemp + local LAST_ERR=/tmp/tmp.q9TKxgUumM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iHvBlC68O1 perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-3 created + cat /tmp/tmp.q9TKxgUumM + rm /tmp/tmp.iHvBlC68O1 /tmp/tmp.q9TKxgUumM + return 0 + wait_restore backup-minio-3 some-name + local backup_name=backup-minio-3 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-3 object to be created.OK Waiting psmdb-restore/restore-backup-minio-3 to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XcBS1e7hH6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.cYIDgn6vWv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XcBS1e7hH6 ++ cat /tmp/tmp.cYIDgn6vWv ++ rm /tmp/tmp.XcBS1e7hH6 /tmp/tmp.cYIDgn6vWv ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + echo + set -o xtrace + '[' -n '' ']' + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 -4th + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:37:37+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TvOIsm8KVR +++ mktemp ++ local LAST_ERR=/tmp/tmp.mga05bgdc0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TvOIsm8KVR ++ cat /tmp/tmp.mga05bgdc0 ++ rm /tmp/tmp.TvOIsm8KVR /tmp/tmp.mga05bgdc0 ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.WLBGFNmY7e ++ mktemp + local LAST_ERR=/tmp/tmp.5mDoeW6bZu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WLBGFNmY7e + cat /tmp/tmp.5mDoeW6bZu + rm /tmp/tmp.WLBGFNmY7e /tmp/tmp.5mDoeW6bZu + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.wGkpxEBpK1/find-4th + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 -4th + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:37:40+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rz646azAMk +++ mktemp ++ local LAST_ERR=/tmp/tmp.N0e0r0ArmE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rz646azAMk ++ cat /tmp/tmp.N0e0r0ArmE ++ rm /tmp/tmp.rz646azAMk /tmp/tmp.N0e0r0ArmE ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.6IrTW6rh4D ++ mktemp + local LAST_ERR=/tmp/tmp.IsPX2tFYq7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6IrTW6rh4D + cat /tmp/tmp.IsPX2tFYq7 + rm /tmp/tmp.6IrTW6rh4D /tmp/tmp.IsPX2tFYq7 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.wGkpxEBpK1/find-4th + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 -4th + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:37:42+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 mongodb '' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EIOijHm05J +++ mktemp ++ local LAST_ERR=/tmp/tmp.e3LlAhjOg5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EIOijHm05J ++ cat /tmp/tmp.e3LlAhjOg5 ++ rm /tmp/tmp.EIOijHm05J /tmp/tmp.e3LlAhjOg5 ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.bCJXVZFoxG ++ mktemp + local LAST_ERR=/tmp/tmp.6uHhg0ase9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bCJXVZFoxG + cat /tmp/tmp.6uHhg0ase9 + rm /tmp/tmp.bCJXVZFoxG /tmp/tmp.6uHhg0ase9 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.wGkpxEBpK1/find-4th + desc 'restore pitr using backupSource on second cluster with another prefix in storage' + set +o xtrace ----------------------------------------------------------------------------------- restore pitr using backupSource on second cluster with another prefix in storage ----------------------------------------------------------------------------------- + second_cluster=cluster2-rs0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/conf/cluster2-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/conf/cluster2-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/conf/cluster2-rs0.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2104-591a1b50"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.c8LoO1sGwk ++ mktemp + local LAST_ERR=/tmp/tmp.tLfLPxgPcJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.c8LoO1sGwk perconaservermongodb.psmdb.percona.com/cluster2 created + cat /tmp/tmp.tLfLPxgPcJ + rm /tmp/tmp.c8LoO1sGwk /tmp/tmp.tLfLPxgPcJ + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running cluster2-rs0 3 + local name=cluster2-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=cluster2 ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod cluster2-rs0-0 + local pod=cluster2-rs0-0 + set +o xtrace waiting for pod/cluster2-rs0-0 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod cluster2-rs0-1 + local pod=cluster2-rs0-1 + set +o xtrace waiting for pod/cluster2-rs0-1 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cluster2 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.C0dsHDprqh +++ mktemp ++ local LAST_ERR=/tmp/tmp.bWh7lg9hBx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cluster2 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.C0dsHDprqh ++ cat /tmp/tmp.bWh7lg9hBx ++ rm /tmp/tmp.C0dsHDprqh /tmp/tmp.bWh7lg9hBx ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod cluster2-rs0-2 + local pod=cluster2-rs0-2 + set +o xtrace waiting for pod/cluster2-rs0-2 to be ready.......OK ++ kubectl_bin get psmdb cluster2 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sithMqxvPY +++ mktemp ++ local LAST_ERR=/tmp/tmp.FUqxAdN9EK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cluster2 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sithMqxvPY ++ cat /tmp/tmp.FUqxAdN9EK ++ rm /tmp/tmp.sithMqxvPY /tmp/tmp.FUqxAdN9EK ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb cluster2 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.taT2RU3hcD +++ mktemp ++ local LAST_ERR=/tmp/tmp.sAxK5DRsdQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cluster2 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.taT2RU3hcD ++ cat /tmp/tmp.sAxK5DRsdQ ++ rm /tmp/tmp.taT2RU3hcD /tmp/tmp.sAxK5DRsdQ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness....... + wait_backup_agent cluster2-rs0-0 + local agent_pod=cluster2-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in cluster2-rs0-0...2025-11-04T22:39:12.000+0000 I listening for the commands + wait_backup_agent cluster2-rs0-1 + local agent_pod=cluster2-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in cluster2-rs0-1...2025-11-04T22:39:19.000+0000 I listening for the commands + wait_backup_agent cluster2-rs0-2 + local agent_pod=cluster2-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in cluster2-rs0-2...2025-11-04T22:39:21.000+0000 I listening for the commands + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@cluster2-rs0.pitr-30181 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@cluster2-rs0.pitr-30181 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vIo0gLzXvZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.3AY3iIhtb9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vIo0gLzXvZ ++ cat /tmp/tmp.3AY3iIhtb9 ++ rm /tmp/tmp.vIo0gLzXvZ /tmp/tmp.3AY3iIhtb9 ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ userAdmin:userAdmin123456@cluster2-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.G2bnBc3Pox ++ mktemp + local LAST_ERR=/tmp/tmp.elEVTCbAYK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.G2bnBc3Pox Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cluster2-rs0-0.cluster2-rs0.pitr-30181.svc.cluster.local:27017,cluster2-rs0-1.cluster2-rs0.pitr-30181.svc.cluster.local:27017,cluster2-rs0-2.cluster2-rs0.pitr-30181.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("8b2bce35-9dd2-482c-8bc1-43113f6a77f1") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.elEVTCbAYK + rm /tmp/tmp.G2bnBc3Pox /tmp/tmp.elEVTCbAYK + return 0 + sleep 2 + check_recovery backup-minio-3 latest '' -4th backupSource cluster2 '' backup-minio-3-second-cluster restore2.yml + local backup_name=backup-minio-3 + local restore_type=latest + local restore_date= + local cmp_postfix=-4th + local backupSource=backupSource + local cluster_name=cluster2 + local selective_collection= + local restore_name=restore-backup-minio-3-second-cluster + local restore_file=restore2.yml + local cluster + cluster=cluster2-rs0 + desc 'write more data before restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by latest ----------------------------------------------------------------------------------- + sleep 60 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@cluster2-rs0.pitr-30181 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@cluster2-rs0.pitr-30181 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jWrYXJZjWV +++ mktemp ++ local LAST_ERR=/tmp/tmp.BPh9MvUZAQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jWrYXJZjWV ++ cat /tmp/tmp.BPh9MvUZAQ ++ rm /tmp/tmp.jWrYXJZjWV /tmp/tmp.BPh9MvUZAQ ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@cluster2-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.6dMCG3BMbO ++ mktemp + local LAST_ERR=/tmp/tmp.aTshM7Vdi5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6dMCG3BMbO Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cluster2-rs0-0.cluster2-rs0.pitr-30181.svc.cluster.local:27017,cluster2-rs0-1.cluster2-rs0.pitr-30181.svc.cluster.local:27017,cluster2-rs0-2.cluster2-rs0.pitr-30181.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c3c85b5b-a780-4d4d-abe4-eb2b3926101c") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.aTshM7Vdi5 + rm /tmp/tmp.6dMCG3BMbO /tmp/tmp.aTshM7Vdi5 + return 0 + '[' -n '' ']' + desc 'waiting for chunks to be uploaded' + set +o xtrace ----------------------------------------------------------------------------------- waiting for chunks to be uploaded ----------------------------------------------------------------------------------- + sleep 120 + desc 'check restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- check restore by latest ----------------------------------------------------------------------------------- + '[' -z backupSource ']' ++ get_backup_dest backup-minio-3 ++ local backup_name=backup-minio-3 ++ kubectl_bin get psmdb-backup backup-minio-3 -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' +++ mktemp ++ sed 's|s3://||' ++ sed 's|azure://||' ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.yg8q1B5OT6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Jddsnk5xNf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-3 -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yg8q1B5OT6 ++ cat /tmp/tmp.Jddsnk5xNf ++ rm /tmp/tmp.yg8q1B5OT6 /tmp/tmp.Jddsnk5xNf ++ return 0 + backup_dest=operator-testing/pitr-prefix-1/2025-11-04T22:33:40Z + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/conf/restore2.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-3-second-cluster/' + /usr/sbin/sed -e /backupName/d + /usr/sbin/sed -e 's/clusterName:/clusterName: cluster2/' + /usr/sbin/sed -e 's/pitrType:/type: latest/' + '[' -z '' ']' + /usr/sbin/sed -e /date:/d + /usr/sbin/sed -e 's|BACKUP-NAME|operator-testing/pitr-prefix-1/2025-11-04T22:33:40Z|' + kubectl_bin apply -f - + '[' -n '' ']' + yq ++ mktemp + local LAST_OUT=/tmp/tmp.7TtoYszS8I ++ mktemp + local LAST_ERR=/tmp/tmp.ZuToDBElkW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7TtoYszS8I perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-3-second-cluster created + cat /tmp/tmp.ZuToDBElkW + rm /tmp/tmp.7TtoYszS8I /tmp/tmp.ZuToDBElkW + return 0 + wait_restore backup-minio-3-second-cluster cluster2 + local backup_name=backup-minio-3-second-cluster + local cluster_name=cluster2 + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-3-second-cluster object to be created.OK Waiting psmdb-restore/restore-backup-minio-3-second-cluster to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency cluster2 + local cluster_name=cluster2 + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb cluster2 -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VTtl63YXTf +++ mktemp ++ local LAST_ERR=/tmp/tmp.gBZsIJ4C7f ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cluster2 -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VTtl63YXTf ++ cat /tmp/tmp.gBZsIJ4C7f ++ rm /tmp/tmp.VTtl63YXTf /tmp/tmp.gBZsIJ4C7f ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + echo + set -o xtrace + '[' -n '' ']' + compare_mongo_cmd find myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181 -4th + local command=find + local uri=myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:43:06+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181 mongodb '' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2vTBBjrsnB +++ mktemp ++ local LAST_ERR=/tmp/tmp.QU7KJUWlpT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2vTBBjrsnB ++ cat /tmp/tmp.QU7KJUWlpT ++ rm /tmp/tmp.2vTBBjrsnB /tmp/tmp.QU7KJUWlpT ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.DqBODaqd3C ++ mktemp + local LAST_ERR=/tmp/tmp.dVVoFW4FCR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DqBODaqd3C + cat /tmp/tmp.dVVoFW4FCR + rm /tmp/tmp.DqBODaqd3C /tmp/tmp.dVVoFW4FCR + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.wGkpxEBpK1/find-4th + compare_mongo_cmd find myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181 -4th + local command=find + local uri=myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:43:08+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZyEWJ4Pcz9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZcYck9ffFI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZyEWJ4Pcz9 ++ cat /tmp/tmp.ZcYck9ffFI ++ rm /tmp/tmp.ZyEWJ4Pcz9 /tmp/tmp.ZcYck9ffFI ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.EzaWszTahU ++ mktemp + local LAST_ERR=/tmp/tmp.9ARw5fIRPD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EzaWszTahU + cat /tmp/tmp.9ARw5fIRPD + rm /tmp/tmp.EzaWszTahU /tmp/tmp.9ARw5fIRPD + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.wGkpxEBpK1/find-4th + compare_mongo_cmd find myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181 -4th + local command=find + local uri=myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:43:11+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local uri=myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.w1FEvcSMcK +++ mktemp ++ local LAST_ERR=/tmp/tmp.heFIXbvZBG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.w1FEvcSMcK ++ cat /tmp/tmp.heFIXbvZBG ++ rm /tmp/tmp.w1FEvcSMcK /tmp/tmp.heFIXbvZBG ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.DFEQblsPsF ++ mktemp + local LAST_ERR=/tmp/tmp.IW4jHigYha + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DFEQblsPsF + cat /tmp/tmp.IW4jHigYha + rm /tmp/tmp.DFEQblsPsF /tmp/tmp.IW4jHigYha + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.wGkpxEBpK1/find-4th + backup_name_minio_2=backup-minio2 + run_backup backup-minio2 0 + local name=backup-minio2 + local idx=0 + desc 'run backup backup-minio2-0' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio2-0 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/conf/backup-minio2.yml + kubectl_bin apply -f - + /usr/sbin/sed -e 's/name:/name: backup-minio2-0/' ++ mktemp + local LAST_OUT=/tmp/tmp.4LQx5Eesg8 ++ mktemp + local LAST_ERR=/tmp/tmp.usfBH6WWBi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4LQx5Eesg8 perconaservermongodbbackup.psmdb.percona.com/backup-minio2-0 created + cat /tmp/tmp.usfBH6WWBi + rm /tmp/tmp.4LQx5Eesg8 /tmp/tmp.usfBH6WWBi + return 0 + wait_backup backup-minio2-0 + local backup_name=backup-minio2-0 + local target_state=ready + set +o xtrace waiting for backup-minio2-0 to reach ready state......OK ++ run_mongo 'new Date().toISOString()' myApp:myPass@cluster2-rs0.pitr-30181 mongodb '' --quiet ++ local 'command=new Date().toISOString()' ++ local uri=myApp:myPass@cluster2-rs0.pitr-30181 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match' ++ cut -c1-19 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ tr T ' ' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2fNbGGOb1C ++++ mktemp +++ local LAST_ERR=/tmp/tmp.K4clvc4GTy +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2fNbGGOb1C +++ cat /tmp/tmp.K4clvc4GTy +++ rm /tmp/tmp.2fNbGGOb1C /tmp/tmp.K4clvc4GTy +++ return 0 ++ local client_container=psmdb-client-696897d69b-d5dfh ++ local mongo_flag=--quiet ++ [[ myApp:myPass@cluster2-rs0.pitr-30181 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''new Date().toISOString()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5g6B51cOo6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.d1EGik65hE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''new Date().toISOString()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5g6B51cOo6 ++ cat /tmp/tmp.d1EGik65hE ++ rm /tmp/tmp.5g6B51cOo6 /tmp/tmp.d1EGik65hE ++ return 0 + time_now='2025-11-04 22:43:30' + write_document -5th cluster2-rs0 + local cmp_postfix=-5th + local cluster_name=cluster2-rs0 + desc 'write initial data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write initial data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@cluster2-rs0.pitr-30181 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@cluster2-rs0.pitr-30181 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DGQZA7OoNt +++ mktemp ++ local LAST_ERR=/tmp/tmp.JVReBHpEeX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DGQZA7OoNt ++ cat /tmp/tmp.JVReBHpEeX ++ rm /tmp/tmp.DGQZA7OoNt /tmp/tmp.JVReBHpEeX ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@cluster2-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.owJliWk2PZ ++ mktemp + local LAST_ERR=/tmp/tmp.gwducFVnYT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.owJliWk2PZ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cluster2-rs0-0.cluster2-rs0.pitr-30181.svc.cluster.local:27017,cluster2-rs0-1.cluster2-rs0.pitr-30181.svc.cluster.local:27017,cluster2-rs0-2.cluster2-rs0.pitr-30181.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("f6f85b44-2c5e-48fa-ad75-a18f1064de51") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.gwducFVnYT + rm /tmp/tmp.owJliWk2PZ /tmp/tmp.gwducFVnYT + return 0 + minikube_sleep + sleep_time=10 + [[ '' == 1 ]] + compare_mongo_cmd find myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181 -5th + local command=find + local uri=myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181 + local postfix=-5th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:43:33+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181 + local driver=mongodb + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.s5bnzu14DI +++ mktemp ++ local LAST_ERR=/tmp/tmp.SxVwDSDuC2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.s5bnzu14DI ++ cat /tmp/tmp.SxVwDSDuC2 ++ rm /tmp/tmp.s5bnzu14DI /tmp/tmp.SxVwDSDuC2 ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.E9J0GQ0u2t ++ mktemp + local LAST_ERR=/tmp/tmp.WJ35u3cRi7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.E9J0GQ0u2t + cat /tmp/tmp.WJ35u3cRi7 + rm /tmp/tmp.E9J0GQ0u2t /tmp/tmp.WJ35u3cRi7 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-5th.json /tmp/tmp.wGkpxEBpK1/find-5th + compare_mongo_cmd find myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181 -5th + local command=find + local uri=myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181 + local postfix=-5th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:43:36+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181 mongodb '' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local uri=myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.U9xMsHuxLN +++ mktemp ++ local LAST_ERR=/tmp/tmp.gpAIUUbpQ0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.U9xMsHuxLN ++ cat /tmp/tmp.gpAIUUbpQ0 ++ rm /tmp/tmp.U9xMsHuxLN /tmp/tmp.gpAIUUbpQ0 ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.o6qLE3UiuE ++ mktemp + local LAST_ERR=/tmp/tmp.lTRDkZzdXK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.o6qLE3UiuE + cat /tmp/tmp.lTRDkZzdXK + rm /tmp/tmp.o6qLE3UiuE /tmp/tmp.lTRDkZzdXK + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-5th.json /tmp/tmp.wGkpxEBpK1/find-5th + compare_mongo_cmd find myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181 -5th + local command=find + local uri=myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181 + local postfix=-5th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:43:39+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181 mongodb '' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0W6LNgeR3Z +++ mktemp ++ local LAST_ERR=/tmp/tmp.JMQFEdleIc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0W6LNgeR3Z ++ cat /tmp/tmp.JMQFEdleIc ++ rm /tmp/tmp.0W6LNgeR3Z /tmp/tmp.JMQFEdleIc ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.vhH9KiM27w ++ mktemp + local LAST_ERR=/tmp/tmp.ZbDDQD8k1s + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vhH9KiM27w + cat /tmp/tmp.ZbDDQD8k1s + rm /tmp/tmp.vhH9KiM27w /tmp/tmp.ZbDDQD8k1s + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-5th.json /tmp/tmp.wGkpxEBpK1/find-5th + sleep 2 + check_recovery backup-minio2-0 date '2025-11-04 22:43:30' -4th '' cluster2 + local backup_name=backup-minio2-0 + local restore_type=date + local 'restore_date=2025-11-04 22:43:30' + local cmp_postfix=-4th + local backupSource= + local cluster_name=cluster2 + local selective_collection= + local restore_name=restore-backup-minio2-0 + local restore_file=restore.yml + local cluster + cluster=cluster2-rs0 + desc 'write more data before restore by date' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by date ----------------------------------------------------------------------------------- + sleep 60 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@cluster2-rs0.pitr-30181 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@cluster2-rs0.pitr-30181 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oSYrILFH9B +++ mktemp ++ local LAST_ERR=/tmp/tmp.0sY7xMHxEK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oSYrILFH9B ++ cat /tmp/tmp.0sY7xMHxEK ++ rm /tmp/tmp.oSYrILFH9B /tmp/tmp.0sY7xMHxEK ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@cluster2-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Vf7EYUT38a ++ mktemp + local LAST_ERR=/tmp/tmp.Hj4iUI4up6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Vf7EYUT38a Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://cluster2-rs0-0.cluster2-rs0.pitr-30181.svc.cluster.local:27017,cluster2-rs0-1.cluster2-rs0.pitr-30181.svc.cluster.local:27017,cluster2-rs0-2.cluster2-rs0.pitr-30181.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e3fcd189-c317-4de3-baff-802cb26a8463") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.Hj4iUI4up6 + rm /tmp/tmp.Vf7EYUT38a /tmp/tmp.Hj4iUI4up6 + return 0 + '[' -n '' ']' + desc 'waiting for chunks to be uploaded' + set +o xtrace ----------------------------------------------------------------------------------- waiting for chunks to be uploaded ----------------------------------------------------------------------------------- + sleep 120 + desc 'check restore by date' + set +o xtrace ----------------------------------------------------------------------------------- check restore by date ----------------------------------------------------------------------------------- + '[' -z '' ']' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio2-0/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio2-0/' + /usr/sbin/sed -e 's/clusterName:/clusterName: cluster2/' + /usr/sbin/sed -e 's/pitrType:/type: date/' + '[' -z '2025-11-04 22:43:30' ']' + /usr/sbin/sed -e 's/date:/date: 2025-11-04 22:43:30/' + /usr/sbin/sed -e /backupSource/,+2d + '[' -n '' ']' + yq + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.KA47jWf77t ++ mktemp + local LAST_ERR=/tmp/tmp.X6ibIiCdow + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KA47jWf77t perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio2-0 created + cat /tmp/tmp.X6ibIiCdow + rm /tmp/tmp.KA47jWf77t /tmp/tmp.X6ibIiCdow + return 0 + wait_restore backup-minio2-0 cluster2 + local backup_name=backup-minio2-0 + local cluster_name=cluster2 + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio2-0 object to be created.OK Waiting psmdb-restore/restore-backup-minio2-0 to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency cluster2 + local cluster_name=cluster2 + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb cluster2 -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AMj5J0Hq7Y +++ mktemp ++ local LAST_ERR=/tmp/tmp.ROtHdUI309 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cluster2 -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AMj5J0Hq7Y ++ cat /tmp/tmp.ROtHdUI309 ++ rm /tmp/tmp.AMj5J0Hq7Y /tmp/tmp.ROtHdUI309 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + echo + set -o xtrace + '[' -n '' ']' + compare_mongo_cmd find myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181 -4th + local command=find + local uri=myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:47:14+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K2Dq7ZJUqx +++ mktemp ++ local LAST_ERR=/tmp/tmp.4dYupelEjs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.K2Dq7ZJUqx ++ cat /tmp/tmp.4dYupelEjs ++ rm /tmp/tmp.K2Dq7ZJUqx /tmp/tmp.4dYupelEjs ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.hXy8xJAC3U ++ mktemp + local LAST_ERR=/tmp/tmp.mfWPcz6b14 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-0.cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hXy8xJAC3U + cat /tmp/tmp.mfWPcz6b14 + rm /tmp/tmp.hXy8xJAC3U /tmp/tmp.mfWPcz6b14 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.wGkpxEBpK1/find-4th + compare_mongo_cmd find myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181 -4th + local command=find + local uri=myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:47:16+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181 mongodb '' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ukpnlK8Nh9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Rz38U1aKKc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ukpnlK8Nh9 ++ cat /tmp/tmp.Rz38U1aKKc ++ rm /tmp/tmp.ukpnlK8Nh9 /tmp/tmp.Rz38U1aKKc ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.2yXL8hZ1Rx ++ mktemp + local LAST_ERR=/tmp/tmp.sAdBV8OMYO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-1.cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2yXL8hZ1Rx + cat /tmp/tmp.sAdBV8OMYO + rm /tmp/tmp.2yXL8hZ1Rx /tmp/tmp.sAdBV8OMYO + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.wGkpxEBpK1/find-4th + compare_mongo_cmd find myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181 -4th + local command=find + local uri=myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-11-04T22:47:19+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HNkx4M9DD5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Xl6YNKnRoW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HNkx4M9DD5 ++ cat /tmp/tmp.Xl6YNKnRoW ++ rm /tmp/tmp.HNkx4M9DD5 /tmp/tmp.Xl6YNKnRoW ++ return 0 + local client_container=psmdb-client-696897d69b-d5dfh + local mongo_flag= + [[ myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.P5hhQjrfOX ++ mktemp + local LAST_ERR=/tmp/tmp.AS3qeY00LM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-d5dfh -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@cluster2-rs0-2.cluster2-rs0.pitr-30181.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.P5hhQjrfOX + cat /tmp/tmp.AS3qeY00LM + rm /tmp/tmp.P5hhQjrfOX /tmp/tmp.AS3qeY00LM + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.wGkpxEBpK1/find-4th + kubectl patch psmdb some-name --type=merge --patch '{"spec": {"backup": {"pitr": {"enabled": false}}}}' perconaservermongodb.psmdb.percona.com/some-name patched + kubectl patch psmdb cluster2 --type=merge --patch '{"spec": {"backup": {"pitr": {"enabled": false}}}}' perconaservermongodb.psmdb.percona.com/cluster2 patched + sleep 20 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.6NqckVLkZo ++ mktemp + local LAST_ERR=/tmp/tmp.ZRhZ7rvu56 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6NqckVLkZo perconaservermongodbbackup.psmdb.percona.com "backup-minio-0" deleted from pitr-30181 namespace perconaservermongodbbackup.psmdb.percona.com "backup-minio-1" deleted from pitr-30181 namespace perconaservermongodbbackup.psmdb.percona.com "backup-minio-2" deleted from pitr-30181 namespace perconaservermongodbbackup.psmdb.percona.com "backup-minio-3" deleted from pitr-30181 namespace perconaservermongodbbackup.psmdb.percona.com "backup-minio2-0" deleted from pitr-30181 namespace + cat /tmp/tmp.ZRhZ7rvu56 + rm /tmp/tmp.6NqckVLkZo /tmp/tmp.ZRhZ7rvu56 + return 0 + destroy pitr-30181 + local namespace=pitr-30181 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.ghgoWGroWp +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZcQxWBAgxz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ghgoWGroWp ++ cat /tmp/tmp.ZcQxWBAgxz No resources found in pitr-30181 namespace. ++ rm /tmp/tmp.ghgoWGroWp /tmp/tmp.ZcQxWBAgxz ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.8U4gbFihsc ++ mktemp + local LAST_ERR=/tmp/tmp.AXO2StlkiF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8U4gbFihsc customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.AXO2StlkiF + rm /tmp/tmp.8U4gbFihsc /tmp/tmp.AXO2StlkiF + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.e2Ch8EIiGb ++ mktemp + local LAST_ERR=/tmp/tmp.ttDobIW7vO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.e2Ch8EIiGb + cat /tmp/tmp.ttDobIW7vO + rm /tmp/tmp.e2Ch8EIiGb /tmp/tmp.ttDobIW7vO + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.UmGwcwe2zX ++ mktemp + local LAST_ERR=/tmp/tmp.mRSByVOURL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UmGwcwe2zX + cat /tmp/tmp.mRSByVOURL + rm /tmp/tmp.UmGwcwe2zX /tmp/tmp.mRSByVOURL + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.le6A2Fpfab ++ mktemp + local LAST_ERR=/tmp/tmp.vsyYM85Nuk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.le6A2Fpfab + cat /tmp/tmp.vsyYM85Nuk + rm /tmp/tmp.le6A2Fpfab /tmp/tmp.vsyYM85Nuk + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.R9ICxwOIqv ++ mktemp + local LAST_ERR=/tmp/tmp.HUyZq5EWJm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2104/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.R9ICxwOIqv clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.HUyZq5EWJm + rm /tmp/tmp.R9ICxwOIqv /tmp/tmp.HUyZq5EWJm + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.aEIzi1CdYx ++ mktemp + local LAST_ERR=/tmp/tmp.Z2Ft7QPX3A + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.aEIzi1CdYx + cat /tmp/tmp.Z2Ft7QPX3A Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.aEIzi1CdYx + cat /tmp/tmp.Z2Ft7QPX3A Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.aEIzi1CdYx + cat /tmp/tmp.Z2Ft7QPX3A Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.aEIzi1CdYx + cat /tmp/tmp.Z2Ft7QPX3A Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.aEIzi1CdYx /tmp/tmp.Z2Ft7QPX3A + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace pitr-30181 + rm -rf /tmp/tmp.wGkpxEBpK1 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + local LAST_OUT=/tmp/tmp.uqD1pUmpX9 + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp + local LAST_OUT=/tmp/tmp.xj0npYluuM ++ mktemp + local LAST_ERR=/tmp/tmp.j66rmdeAZX + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.q1Tek6zDFb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace pitr-30181 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator