Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/logs/demand-backup-incremental.log WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + create_infra demand-backup-incremental-21272 + local ns=demand-backup-incremental-21272 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.IYoKcrbBcq ++ mktemp + local LAST_ERR=/tmp/tmp.Fx07QLLIEs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IYoKcrbBcq customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.Fx07QLLIEs + rm /tmp/tmp.IYoKcrbBcq /tmp/tmp.Fx07QLLIEs + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-2440 backup-azure-blob --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-2440 backup-gcp-cs --type=merge -p '{"metadata":{"finalizers":[]}}' Error from server (NotFound): perconaservermongodbbackups.psmdb.percona.com "backup-gcp-cs" not found + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-2440 backup-minio-not-base --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-minio-not-base patched + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.htpudbQ5Q8 ++ mktemp + local LAST_ERR=/tmp/tmp.yPviJssx3U + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.htpudbQ5Q8 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com condition met + cat /tmp/tmp.yPviJssx3U + rm /tmp/tmp.htpudbQ5Q8 /tmp/tmp.yPviJssx3U + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.r63Na3ZYim ++ mktemp + local LAST_ERR=/tmp/tmp.6NiWkN3wSB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.r63Na3ZYim + cat /tmp/tmp.6NiWkN3wSB + rm /tmp/tmp.r63Na3ZYim /tmp/tmp.6NiWkN3wSB + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.UjxbWdoV2e ++ mktemp + local LAST_ERR=/tmp/tmp.JTnEnB4P1M + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UjxbWdoV2e + cat /tmp/tmp.JTnEnB4P1M + rm /tmp/tmp.UjxbWdoV2e /tmp/tmp.JTnEnB4P1M + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.BKCJFH0Xhx ++ mktemp + local LAST_ERR=/tmp/tmp.l1oicenr2r + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BKCJFH0Xhx clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.l1oicenr2r + rm /tmp/tmp.BKCJFH0Xhx /tmp/tmp.l1oicenr2r + return 0 + check_crd_for_deletion PR-1938-23826c20 + local git_tag=PR-1938-23826c20 ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ yq eval .metadata.name ++ /usr/bin/sed s/---//g ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1938-23826c20/deploy/crd.yaml + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SVC3P7ZHc4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.APRwg9tpA6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.SVC3P7ZHc4 ++ cat /tmp/tmp.APRwg9tpA6 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.SVC3P7ZHc4 ++ cat /tmp/tmp.APRwg9tpA6 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.SVC3P7ZHc4 ++ cat /tmp/tmp.APRwg9tpA6 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.SVC3P7ZHc4 ++ cat /tmp/tmp.APRwg9tpA6 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.SVC3P7ZHc4 /tmp/tmp.APRwg9tpA6 ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.Hrc2ggARfA ++ mktemp + awk '{print$1}' + local LAST_ERR=/tmp/tmp.H8ZRuzrVsD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.GovfkDQNB8 ++ mktemp + local LAST_ERR=/tmp/tmp.9zKiHqhSkV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GovfkDQNB8 + cat /tmp/tmp.9zKiHqhSkV + rm /tmp/tmp.GovfkDQNB8 /tmp/tmp.9zKiHqhSkV + return 0 namespace "demand-backup-incremental-2440" deleted namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Hrc2ggARfA namespace "psmdb-operator" deleted + cat /tmp/tmp.H8ZRuzrVsD + rm /tmp/tmp.Hrc2ggARfA /tmp/tmp.H8ZRuzrVsD + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.nEeJ4s3eMu ++ mktemp + local LAST_ERR=/tmp/tmp.VOS2qSf7IX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nEeJ4s3eMu + cat /tmp/tmp.VOS2qSf7IX + rm /tmp/tmp.nEeJ4s3eMu /tmp/tmp.VOS2qSf7IX + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.WjE1XQCcQt ++ mktemp + local LAST_ERR=/tmp/tmp.Ip6G9oeAMy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WjE1XQCcQt namespace/psmdb-operator created + cat /tmp/tmp.Ip6G9oeAMy + rm /tmp/tmp.WjE1XQCcQt /tmp/tmp.Ip6G9oeAMy + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.VE1MKRfuz6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.vYYGzXp6JP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VE1MKRfuz6 ++ cat /tmp/tmp.vYYGzXp6JP ++ rm /tmp/tmp.VE1MKRfuz6 /tmp/tmp.vYYGzXp6JP ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1938-23826c20-3-cluster4 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.fKKy1CI7nK ++ mktemp + local LAST_ERR=/tmp/tmp.ie6BMwZM5w + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1938-23826c20-3-cluster4 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fKKy1CI7nK Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1938-23826c20-3-cluster4" modified. + cat /tmp/tmp.ie6BMwZM5w + rm /tmp/tmp.fKKy1CI7nK /tmp/tmp.ie6BMwZM5w + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.gO6Vw63GJm ++ mktemp + local LAST_ERR=/tmp/tmp.zLYFATl5do + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gO6Vw63GJm customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.zLYFATl5do + rm /tmp/tmp.gO6Vw63GJm /tmp/tmp.zLYFATl5do + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.S65RJN3WFf ++ mktemp + local LAST_ERR=/tmp/tmp.W5wWw7WKVu + local exit_status=0 + local timeout=4 + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/deploy/cw-rbac.yaml ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.S65RJN3WFf clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.W5wWw7WKVu + rm /tmp/tmp.S65RJN3WFf /tmp/tmp.W5wWw7WKVu + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1938-23826c20") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Dfg3PNoGZN ++ mktemp + local LAST_ERR=/tmp/tmp.FLIb3sTjMC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Dfg3PNoGZN deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.FLIb3sTjMC + rm /tmp/tmp.Dfg3PNoGZN /tmp/tmp.FLIb3sTjMC + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.HOfc9JiNVe +++ mktemp ++ local LAST_ERR=/tmp/tmp.kW63lKvNNA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HOfc9JiNVe ++ cat /tmp/tmp.kW63lKvNNA ++ rm /tmp/tmp.HOfc9JiNVe /tmp/tmp.kW63lKvNNA ++ return 0 + wait_pod percona-server-mongodb-operator-688cb5cf6c-m68bt + local pod=percona-server-mongodb-operator-688cb5cf6c-m68bt + set +o xtrace waiting for pod/percona-server-mongodb-operator-688cb5cf6c-m68bt to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.s69KTuwtze +++ mktemp ++ local LAST_ERR=/tmp/tmp.EQ2qN5Tdmf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.s69KTuwtze ++ cat /tmp/tmp.EQ2qN5Tdmf ++ rm /tmp/tmp.s69KTuwtze /tmp/tmp.EQ2qN5Tdmf ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-688cb5cf6c-m68bt ++ mktemp + local LAST_OUT=/tmp/tmp.g1xou1uZlw ++ mktemp + local LAST_ERR=/tmp/tmp.3uLS2Wp5CZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs percona-server-mongodb-operator-688cb5cf6c-m68bt + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g1xou1uZlw + cat /tmp/tmp.3uLS2Wp5CZ + rm /tmp/tmp.g1xou1uZlw /tmp/tmp.3uLS2Wp5CZ + return 0 2025-05-21T23:06:06.524Z INFO setup Manager starting up {"gitCommit": "23826c2013e23b0dc83348b69100d3c7104a88f2", "gitBranch": "PR-1938-23826c20", "buildTime": "", "goVersion": "go1.24.3", "os": "linux", "arch": "amd64"} + create_namespace demand-backup-incremental-21272 + local namespace=demand-backup-incremental-21272 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl api-resources ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + '[' -n '' ']' ++ mktemp + xargs kubectl delete ns + desc 'cleaned up old namespaces demand-backup-incremental-21272' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-incremental-21272 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace demand-backup-incremental-21272 --ignore-not-found ++ mktemp + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + local LAST_OUT=/tmp/tmp.wD3O3dUcH0 + local LAST_OUT=/tmp/tmp.67bcVnFbMs ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.mPILJpxjGK + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.pJ6tqoZrTS + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + for i in '$(seq 0 2)' + set +e + set +e + kubectl get ns + kubectl delete namespace demand-backup-incremental-21272 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wD3O3dUcH0 + cat /tmp/tmp.mPILJpxjGK + rm /tmp/tmp.wD3O3dUcH0 /tmp/tmp.mPILJpxjGK + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.67bcVnFbMs + cat /tmp/tmp.pJ6tqoZrTS + rm /tmp/tmp.67bcVnFbMs /tmp/tmp.pJ6tqoZrTS + return 0 + kubectl_bin wait --for=delete namespace demand-backup-incremental-21272 ++ mktemp + local LAST_OUT=/tmp/tmp.SV0HQmU1Go ++ mktemp + local LAST_ERR=/tmp/tmp.QdhcMpgexX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace demand-backup-incremental-21272 namespace "gke-managed-cim" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SV0HQmU1Go + cat /tmp/tmp.QdhcMpgexX + rm /tmp/tmp.SV0HQmU1Go /tmp/tmp.QdhcMpgexX + return 0 + desc 'create namespace demand-backup-incremental-21272' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-incremental-21272 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-incremental-21272 ++ mktemp + local LAST_OUT=/tmp/tmp.T7LLrWE7q2 ++ mktemp + local LAST_ERR=/tmp/tmp.E4ILS3ws8g + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace demand-backup-incremental-21272 namespace "gke-managed-system" deleted namespace "gmp-public" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.T7LLrWE7q2 namespace/demand-backup-incremental-21272 created + cat /tmp/tmp.E4ILS3ws8g + rm /tmp/tmp.T7LLrWE7q2 /tmp/tmp.E4ILS3ws8g + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.Hj3WnzgKU9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.XypNk8UoQ3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Hj3WnzgKU9 ++ cat /tmp/tmp.XypNk8UoQ3 ++ rm /tmp/tmp.Hj3WnzgKU9 /tmp/tmp.XypNk8UoQ3 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1938-23826c20-3-cluster4 --namespace=demand-backup-incremental-21272 ++ mktemp + local LAST_OUT=/tmp/tmp.c507xYS0gO ++ mktemp + local LAST_ERR=/tmp/tmp.L9P42Mtmun + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1938-23826c20-3-cluster4 --namespace=demand-backup-incremental-21272 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.c507xYS0gO Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1938-23826c20-3-cluster4" modified. + cat /tmp/tmp.L9P42Mtmun + rm /tmp/tmp.c507xYS0gO /tmp/tmp.L9P42Mtmun + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Wed May 21 23:06:29 2025 NAMESPACE: demand-backup-incremental-21272 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.demand-backup-incremental-21272.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace demand-backup-incremental-21272 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace demand-backup-incremental-21272 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace demand-backup-incremental-21272 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace demand-backup-incremental-21272 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jggbLgWfIK +++ mktemp ++ local LAST_ERR=/tmp/tmp.jbs1oWhWPj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jggbLgWfIK ++ cat /tmp/tmp.jbs1oWhWPj ++ rm /tmp/tmp.jggbLgWfIK /tmp/tmp.jbs1oWhWPj ++ return 0 + MINIO_POD=minio-service-86dfccd949-p2dfn + wait_pod minio-service-86dfccd949-p2dfn + local pod=minio-service-86dfccd949-p2dfn + set +o xtrace waiting for pod/minio-service-86dfccd949-p2dfn to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-incremental-21272.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.74WcGDulIw ++ mktemp + local LAST_ERR=/tmp/tmp.8FvS0VuJp3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-incremental-21272.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.74WcGDulIw service/minio-service created + cat /tmp/tmp.8FvS0VuJp3 + rm /tmp/tmp.74WcGDulIw /tmp/tmp.8FvS0VuJp3 + return 0 + create_minio_bucket operator-testing + local bucket=operator-testing + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.IPcdWDRAiD ++ mktemp + local LAST_ERR=/tmp/tmp.ZLrvJenP2Z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IPcdWDRAiD make_bucket: operator-testing pod "aws-cli" deleted + cat /tmp/tmp.ZLrvJenP2Z If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: unable to upgrade connection: container aws-cli not found in pod aws-cli_demand-backup-incremental-21272 + rm /tmp/tmp.IPcdWDRAiD /tmp/tmp.ZLrvJenP2Z + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.qJSpsteOMA ++ mktemp + local LAST_ERR=/tmp/tmp.oWLnILecAC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qJSpsteOMA secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.oWLnILecAC + rm /tmp/tmp.qJSpsteOMA /tmp/tmp.oWLnILecAC + return 0 + desc 'Testing on not sharded cluster' + set +o xtrace ----------------------------------------------------------------------------------- Testing on not sharded cluster ----------------------------------------------------------------------------------- + log 'Creating PSMDB cluster' + set +o xtrace [2025-05-21T23:07:07+0000] Creating PSMDB cluster + cluster=some-name + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.gkbiSrFWty ++ mktemp + local LAST_ERR=/tmp/tmp.U8f8Gl5lzs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gkbiSrFWty secret/some-users created + cat /tmp/tmp.U8f8Gl5lzs + rm /tmp/tmp.gkbiSrFWty /tmp/tmp.U8f8Gl5lzs + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/some-name.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/some-name.yml + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1938-23826c20"' + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' ++ mktemp + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/some-name.yml + local LAST_OUT=/tmp/tmp.lkXEvTOM6d ++ mktemp + local LAST_ERR=/tmp/tmp.tu3qTEJExo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lkXEvTOM6d perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.tu3qTEJExo + rm /tmp/tmp.lkXEvTOM6d /tmp/tmp.tu3qTEJExo + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.c4O7lmYF8y ++ mktemp + local LAST_ERR=/tmp/tmp.u5GWreL7Az + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/conf/client_with_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.c4O7lmYF8y deployment.apps/psmdb-client created + cat /tmp/tmp.u5GWreL7Az + rm /tmp/tmp.c4O7lmYF8y /tmp/tmp.u5GWreL7Az + return 0 + log 'check if all pods started' + set +o xtrace [2025-05-21T23:07:13+0000] check if all pods started + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.....OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IODZUFGj6E +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z1dhbB3ocA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IODZUFGj6E ++ cat /tmp/tmp.Z1dhbB3ocA ++ rm /tmp/tmp.IODZUFGj6E /tmp/tmp.Z1dhbB3ocA ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.....OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LXD1MPGoK1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.weTXh9AGsg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LXD1MPGoK1 ++ cat /tmp/tmp.weTXh9AGsg ++ rm /tmp/tmp.LXD1MPGoK1 /tmp/tmp.weTXh9AGsg ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness. + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tBal0gV8IZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.hbu596aaIW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tBal0gV8IZ ++ cat /tmp/tmp.hbu596aaIW ++ rm /tmp/tmp.tBal0gV8IZ /tmp/tmp.hbu596aaIW ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + log 'writing test data' + set +o xtrace [2025-05-21T23:08:42+0000] writing test data + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-21272 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iP07SrvcHZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.VjZGOUvzRh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iP07SrvcHZ ++ cat /tmp/tmp.VjZGOUvzRh ++ rm /tmp/tmp.iP07SrvcHZ /tmp/tmp.VjZGOUvzRh ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.YoCKEY9AeV ++ mktemp + local LAST_ERR=/tmp/tmp.ZxxAcuUNFk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YoCKEY9AeV Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("b51cf442-4d13-424a-a2e9-bf60bd578b04") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.ZxxAcuUNFk + rm /tmp/tmp.YoCKEY9AeV /tmp/tmp.ZxxAcuUNFk + return 0 + sleep 1 + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.demand-backup-incremental-21272 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GWwLdsSQcv +++ mktemp ++ local LAST_ERR=/tmp/tmp.70nEaRecJY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GWwLdsSQcv ++ cat /tmp/tmp.70nEaRecJY ++ rm /tmp/tmp.GWwLdsSQcv /tmp/tmp.70nEaRecJY ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.3H00tzeM2c ++ mktemp + local LAST_ERR=/tmp/tmp.DKKnpG5DOs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3H00tzeM2c Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("646c41a6-1e5e-4e67-b6eb-7cbfc96c65fa") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.DKKnpG5DOs + rm /tmp/tmp.3H00tzeM2c /tmp/tmp.DKKnpG5DOs + return 0 + sleep 5 + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:08:55+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.muqkqCduwk +++ mktemp ++ local LAST_ERR=/tmp/tmp.yTtwmoWdNE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.muqkqCduwk ++ cat /tmp/tmp.yTtwmoWdNE ++ rm /tmp/tmp.muqkqCduwk /tmp/tmp.yTtwmoWdNE ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.TjrSoXkiV2 ++ mktemp + local LAST_ERR=/tmp/tmp.acQvj8LjQb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TjrSoXkiV2 + cat /tmp/tmp.acQvj8LjQb + rm /tmp/tmp.TjrSoXkiV2 /tmp/tmp.acQvj8LjQb + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.SERsePjeEv/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:08:58+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.YuvWW4g5TN +++ mktemp ++ local LAST_ERR=/tmp/tmp.iRDDia0I5s ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YuvWW4g5TN ++ cat /tmp/tmp.iRDDia0I5s ++ rm /tmp/tmp.YuvWW4g5TN /tmp/tmp.iRDDia0I5s ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.AWJp4AJO6q ++ mktemp + local LAST_ERR=/tmp/tmp.LMYvV89KQa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AWJp4AJO6q + cat /tmp/tmp.LMYvV89KQa + rm /tmp/tmp.AWJp4AJO6q /tmp/tmp.LMYvV89KQa + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.SERsePjeEv/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:09:02+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oFJuDVFwAV +++ mktemp ++ local LAST_ERR=/tmp/tmp.DIIue1BNog ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oFJuDVFwAV ++ cat /tmp/tmp.DIIue1BNog ++ rm /tmp/tmp.oFJuDVFwAV /tmp/tmp.DIIue1BNog ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.vFUYl0CFwT ++ mktemp + local LAST_ERR=/tmp/tmp.SBSOfYaYjf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vFUYl0CFwT + cat /tmp/tmp.SBSOfYaYjf + rm /tmp/tmp.vFUYl0CFwT /tmp/tmp.SBSOfYaYjf + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.SERsePjeEv/find + log 'running backups' + set +o xtrace [2025-05-21T23:09:05+0000] running backups + '[' -z '' ']' + backup_name_aws=backup-aws-s3 + backup_name_gcp=backup-gcp-cs + backup_name_azure=backup-azure-blob + run_backup aws-s3 backup-aws-s3 + local storage=aws-s3 + local backup_name=backup-aws-s3 + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/backup.yml + yq '.spec.type="incremental-base"' + kubectl_bin apply -f - + /usr/bin/sed -e 's/name:/name: backup-aws-s3/' ++ mktemp + local LAST_OUT=/tmp/tmp.0hLbi17Ei5 ++ mktemp + local LAST_ERR=/tmp/tmp.IFGmSNP7Mz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's/storageName:/storageName: aws-s3/' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0hLbi17Ei5 perconaservermongodbbackup.psmdb.percona.com/backup-aws-s3 created + cat /tmp/tmp.IFGmSNP7Mz + rm /tmp/tmp.0hLbi17Ei5 /tmp/tmp.IFGmSNP7Mz + return 0 + run_backup gcp-cs backup-gcp-cs + local storage=gcp-cs + local backup_name=backup-gcp-cs + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq '.spec.type="incremental-base"' + /usr/bin/sed -e 's/storageName:/storageName: gcp-cs/' + /usr/bin/sed -e 's/name:/name: backup-gcp-cs/' + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/backup.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.FJwv41Kl2b ++ mktemp + local LAST_ERR=/tmp/tmp.xP5czBddMR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FJwv41Kl2b perconaservermongodbbackup.psmdb.percona.com/backup-gcp-cs created + cat /tmp/tmp.xP5czBddMR + rm /tmp/tmp.FJwv41Kl2b /tmp/tmp.xP5czBddMR + return 0 + run_backup azure-blob backup-azure-blob + local storage=azure-blob + local backup_name=backup-azure-blob + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/backup.yml + /usr/bin/sed -e 's/storageName:/storageName: azure-blob/' + yq '.spec.type="incremental-base"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.IpGVOQZBx0 + /usr/bin/sed -e 's/name:/name: backup-azure-blob/' ++ mktemp + local LAST_ERR=/tmp/tmp.IBvtaVnHYZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IpGVOQZBx0 perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob created + cat /tmp/tmp.IBvtaVnHYZ + rm /tmp/tmp.IpGVOQZBx0 /tmp/tmp.IBvtaVnHYZ + return 0 + wait_backup backup-aws-s3 + local backup_name=backup-aws-s3 + local target_state=ready + set +o xtrace waiting for backup-aws-s3 to reach ready state....... + check_backup_in_storage backup-aws-s3 s3 rs0 + local backup=backup-aws-s3 + local storage_type=s3 + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=s3.amazonaws.com ++ get_backup_dest backup-aws-s3 ++ local backup_name=backup-aws-s3 ++ sed 's|azure://||' ++ kubectl_bin get psmdb-backup backup-aws-s3 -o 'jsonpath={.status.destination}' +++ mktemp ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ local LAST_OUT=/tmp/tmp.wYQ4LO5GMU +++ mktemp ++ local LAST_ERR=/tmp/tmp.QVodfvOOnZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-aws-s3 -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wYQ4LO5GMU ++ cat /tmp/tmp.QVodfvOOnZ ++ rm /tmp/tmp.wYQ4LO5GMU /tmp/tmp.QVodfvOOnZ ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental/2025-05-21T23:09:09Z + local url=https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental/2025-05-21T23:09:09Z/rs0/filelist.pbm + log 'checking if https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental/2025-05-21T23:09:09Z/rs0/filelist.pbm exists' + set +o xtrace [2025-05-21T23:09:30+0000] checking if https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental/2025-05-21T23:09:09Z/rs0/filelist.pbm exists + curl --fail --head https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental/2025-05-21T23:09:09Z/rs0/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 13575 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 13575 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/1.1 200 OK x-amz-id-2: G7/X86rmcCPsH13pOuY+szi5WhM9duy7dbVjQN3vQ0oLC//SpkKKl5mLy4AE+QXgx/VakOjMAcU= x-amz-request-id: 2AMQTW5CJTRRR4K7 Date: Wed, 21 May 2025 23:09:31 GMT Last-Modified: Wed, 21 May 2025 23:09:27 GMT x-amz-expiration: expiry-date="Fri, 23 May 2025 00:00:00 GMT", rule-id="1 Days Cleanup" ETag: "066c1c083520e28ae8b496a93f7f89da" x-amz-server-side-encryption: AES256 Accept-Ranges: bytes Content-Type: binary/octet-stream Content-Length: 13575 Server: AmazonS3 + wait_backup backup-gcp-cs + local backup_name=backup-gcp-cs + local target_state=ready + set +o xtrace waiting for backup-gcp-cs to reach ready state................. + check_backup_in_storage backup-gcp-cs gcs rs0 + local backup=backup-gcp-cs + local storage_type=gcs + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=storage.googleapis.com ++ get_backup_dest backup-gcp-cs ++ local backup_name=backup-gcp-cs ++ sed 's|s3://||' ++ sed 's|azure://||' ++ kubectl_bin get psmdb-backup backup-gcp-cs -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZS7ERRHONf +++ mktemp ++ local LAST_ERR=/tmp/tmp.bLWRcfmvFr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-gcp-cs -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZS7ERRHONf ++ cat /tmp/tmp.bLWRcfmvFr ++ rm /tmp/tmp.ZS7ERRHONf /tmp/tmp.bLWRcfmvFr ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental/2025-05-21T23:09:47Z + local url=https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental/2025-05-21T23:09:47Z/rs0/filelist.pbm + log 'checking if https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental/2025-05-21T23:09:47Z/rs0/filelist.pbm exists' + set +o xtrace [2025-05-21T23:10:10+0000] checking if https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental/2025-05-21T23:09:47Z/rs0/filelist.pbm exists + curl --fail --head https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental/2025-05-21T23:09:47Z/rs0/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 14375 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 14375 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/2 200 content-type: application/octet-stream x-guploader-uploadid: AAO2VwrbKd7RIPmgcOQkL01uOgQpNsgj4SCBgSou2ZeFPXyuha4F_6BsKu9ku2dibn-LuTm5f1g0JIc expires: Thu, 22 May 2025 00:10:10 GMT date: Wed, 21 May 2025 23:10:10 GMT cache-control: public, max-age=3600 last-modified: Wed, 21 May 2025 23:10:02 GMT etag: "4b680fa11628d9d230ade939f7ff5c5d" x-goog-generation: 1747869002598671 x-goog-metageneration: 1 x-goog-stored-content-encoding: identity x-goog-stored-content-length: 14375 x-goog-hash: crc32c=9v/QBA== x-goog-hash: md5=S2gPoRYo2dIwrek59/9cXQ== x-goog-expiration: Thu, 22 May 2025 23:10:02 GMT x-goog-storage-class: STANDARD accept-ranges: bytes content-length: 14375 server: UploadServer alt-svc: h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + wait_backup backup-azure-blob + local backup_name=backup-azure-blob + local target_state=ready + set +o xtrace waiting for backup-azure-blob to reach ready state. + check_backup_in_storage backup-azure-blob azure rs0 + local backup=backup-azure-blob + local storage_type=azure + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=engk8soperators.blob.core.windows.net ++ get_backup_dest backup-azure-blob ++ local backup_name=backup-azure-blob ++ kubectl_bin get psmdb-backup backup-azure-blob -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|azure://||' +++ mktemp ++ sed 's|s3://||' ++ local LAST_OUT=/tmp/tmp.XawBTBeDI5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.EtUKkrEVqV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-azure-blob -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XawBTBeDI5 ++ cat /tmp/tmp.EtUKkrEVqV ++ rm /tmp/tmp.XawBTBeDI5 /tmp/tmp.EtUKkrEVqV ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental/2025-05-21T23:09:30Z + local url=https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental/2025-05-21T23:09:30Z/rs0/filelist.pbm + log 'checking if https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental/2025-05-21T23:09:30Z/rs0/filelist.pbm exists' + set +o xtrace [2025-05-21T23:10:13+0000] checking if https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental/2025-05-21T23:09:30Z/rs0/filelist.pbm exists + curl --fail --head https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental/2025-05-21T23:09:30Z/rs0/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 13575 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/1.1 200 OK Content-Length: 13575 Content-Type: application/octet-stream Content-MD5: aXK9lw4sSYdJtUTIXcm6ww== Last-Modified: Wed, 21 May 2025 23:09:42 GMT ETag: 0x8DD98BC9212DDCC Server: Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 x-ms-request-id: bfbbd10e-a01e-003f-7da5-ca17cc000000 x-ms-version: 2009-09-19 x-ms-lease-status: unlocked x-ms-blob-type: BlockBlob Date: Wed, 21 May 2025 23:10:13 GMT + backup_name_minio=backup-minio + run_backup minio backup-minio + local storage=minio + local backup_name=backup-minio + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/backup.yml + /usr/bin/sed -e 's/name:/name: backup-minio/' + yq '.spec.type="incremental-base"' + kubectl_bin apply -f - ++ mktemp + /usr/bin/sed -e 's/storageName:/storageName: minio/' + local LAST_OUT=/tmp/tmp.2YRrClJ8W1 ++ mktemp + local LAST_ERR=/tmp/tmp.HzCh7TWnPV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2YRrClJ8W1 perconaservermongodbbackup.psmdb.percona.com/backup-minio created + cat /tmp/tmp.HzCh7TWnPV + rm /tmp/tmp.2YRrClJ8W1 /tmp/tmp.HzCh7TWnPV + return 0 + wait_backup backup-minio + local backup_name=backup-minio + local target_state=ready + set +o xtrace waiting for backup-minio to reach ready state..... + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-incremental-21272 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1pv0sEui5X +++ mktemp ++ local LAST_ERR=/tmp/tmp.oHNjJ3snaU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1pv0sEui5X ++ cat /tmp/tmp.oHNjJ3snaU ++ rm /tmp/tmp.1pv0sEui5X /tmp/tmp.oHNjJ3snaU ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.nff9VJEXCW ++ mktemp + local LAST_ERR=/tmp/tmp.5w5QRp8QFI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nff9VJEXCW Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("a45a8f22-6ac9-4a6b-ba3a-d7bf93f978bc") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.5w5QRp8QFI + rm /tmp/tmp.nff9VJEXCW /tmp/tmp.5w5QRp8QFI + return 0 + sleep 5 + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:10:35+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hO0UfVjpQW +++ mktemp ++ local LAST_ERR=/tmp/tmp.SLbZ24ChRY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hO0UfVjpQW ++ cat /tmp/tmp.SLbZ24ChRY ++ rm /tmp/tmp.hO0UfVjpQW /tmp/tmp.SLbZ24ChRY ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.KUxasGzapi ++ mktemp + local LAST_ERR=/tmp/tmp.Ttxwdcv8nC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KUxasGzapi + cat /tmp/tmp.Ttxwdcv8nC + rm /tmp/tmp.KUxasGzapi /tmp/tmp.Ttxwdcv8nC + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.SERsePjeEv/find-not-base + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:10:39+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1K22zIPNHr +++ mktemp ++ local LAST_ERR=/tmp/tmp.fIb56yB31Z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1K22zIPNHr ++ cat /tmp/tmp.fIb56yB31Z ++ rm /tmp/tmp.1K22zIPNHr /tmp/tmp.fIb56yB31Z ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.8ejKBB4rTM ++ mktemp + local LAST_ERR=/tmp/tmp.DSrKdxim6h + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8ejKBB4rTM + cat /tmp/tmp.DSrKdxim6h + rm /tmp/tmp.8ejKBB4rTM /tmp/tmp.DSrKdxim6h + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.SERsePjeEv/find-not-base + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:10:42+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local LAST_OUT=/tmp/tmp.W2gp0ctJIq +++ mktemp ++ local LAST_ERR=/tmp/tmp.DtqqUo2OHR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.W2gp0ctJIq ++ cat /tmp/tmp.DtqqUo2OHR ++ rm /tmp/tmp.W2gp0ctJIq /tmp/tmp.DtqqUo2OHR ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.K7uXZMVQh5 ++ mktemp + local LAST_ERR=/tmp/tmp.kUAmDySInM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.K7uXZMVQh5 + cat /tmp/tmp.kUAmDySInM + rm /tmp/tmp.K7uXZMVQh5 /tmp/tmp.kUAmDySInM + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.SERsePjeEv/find-not-base + backup_name_minio_not_base=backup-minio-not-base + run_backup minio backup-minio-not-base false + local storage=minio + local backup_name=backup-minio-not-base + local base=false + local backup_type=incremental + [[ false == \t\r\u\e ]] + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/backup.yml + yq '.spec.type="incremental"' + /usr/bin/sed -e 's/name:/name: backup-minio-not-base/' + /usr/bin/sed -e 's/storageName:/storageName: minio/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.euAPR1F25W ++ mktemp + local LAST_ERR=/tmp/tmp.kXTPIiZlIX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.euAPR1F25W perconaservermongodbbackup.psmdb.percona.com/backup-minio-not-base created + cat /tmp/tmp.kXTPIiZlIX + rm /tmp/tmp.euAPR1F25W /tmp/tmp.kXTPIiZlIX + return 0 + wait_backup backup-minio-not-base + local backup_name=backup-minio-not-base + local target_state=ready + set +o xtrace waiting for backup-minio-not-base to reach ready state..... + '[' -z '' ']' + run_restore backup-aws-s3 + local backup_name=backup-aws-s3 + log 'drop collection' + set +o xtrace [2025-05-21T23:11:00+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-21272 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QIypBXpCPW +++ mktemp ++ local LAST_ERR=/tmp/tmp.Yzm1Jd50cj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QIypBXpCPW ++ cat /tmp/tmp.Yzm1Jd50cj ++ rm /tmp/tmp.QIypBXpCPW /tmp/tmp.Yzm1Jd50cj ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.yZ7Pboqq8N ++ mktemp + local LAST_ERR=/tmp/tmp.O55CG2aUdI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yZ7Pboqq8N Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("cf20aa90-f025-407e-8f6d-3c488fe88e25") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.O55CG2aUdI + rm /tmp/tmp.yZ7Pboqq8N /tmp/tmp.O55CG2aUdI + return 0 + log 'check backup and restore -- backup-aws-s3' + set +o xtrace [2025-05-21T23:11:03+0000] check backup and restore -- backup-aws-s3 + /usr/bin/sed -e 's/name:/name: restore-backup-aws-s3/' + /usr/bin/sed -e 's/backupName:/backupName: backup-aws-s3/' + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/restore.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.D0ya5Tjm1K ++ mktemp + local LAST_ERR=/tmp/tmp.PYZnv1WEuw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.D0ya5Tjm1K perconaservermongodbrestore.psmdb.percona.com/restore-backup-aws-s3 created + cat /tmp/tmp.PYZnv1WEuw + rm /tmp/tmp.D0ya5Tjm1K /tmp/tmp.PYZnv1WEuw + return 0 + run_recovery_check backup-aws-s3 + local backup_name=backup-aws-s3 + local compare_suffix=_restore + local base=true + wait_restore backup-aws-s3 some-name requested 0 3000 + local backup_name=backup-aws-s3 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-aws-s3 object to be createdOK Waiting psmdb-restore/restore-backup-aws-s3 to reach state "requested" .OK + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore + local resource=statefulset/some-name-rs0 + local postfix=_restore + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml + local new_result=/tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-21272", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.pz0e0ds1uD ++ mktemp + local LAST_ERR=/tmp/tmp.9sRjRMr5fz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pz0e0ds1uD + cat /tmp/tmp.9sRjRMr5fz + rm /tmp/tmp.pz0e0ds1uD /tmp/tmp.9sRjRMr5fz + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + wait_restore backup-aws-s3 some-name ready 0 1800 + local backup_name=backup-aws-s3 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-aws-s3 object to be createdOK Waiting psmdb-restore/restore-backup-aws-s3 to reach state "ready" .OK + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8ckfcER9Bd +++ mktemp ++ local LAST_ERR=/tmp/tmp.hFsT5c0d5y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8ckfcER9Bd ++ cat /tmp/tmp.hFsT5c0d5y ++ rm /tmp/tmp.8ckfcER9Bd /tmp/tmp.hFsT5c0d5y ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jyxYOP0tCy +++ mktemp ++ local LAST_ERR=/tmp/tmp.P1X4T8kJEK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jyxYOP0tCy ++ cat /tmp/tmp.P1X4T8kJEK ++ rm /tmp/tmp.jyxYOP0tCy /tmp/tmp.P1X4T8kJEK ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VTG1v1K3ez +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZZ69fQNQak ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VTG1v1K3ez ++ cat /tmp/tmp.ZZ69fQNQak ++ rm /tmp/tmp.VTG1v1K3ez /tmp/tmp.ZZ69fQNQak ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rs5PUE5FZZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.TeeO2FjNF0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rs5PUE5FZZ ++ cat /tmp/tmp.TeeO2FjNF0 ++ rm /tmp/tmp.rs5PUE5FZZ /tmp/tmp.TeeO2FjNF0 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.b4B0i5dTw4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.iCGNZITPAO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.b4B0i5dTw4 ++ cat /tmp/tmp.iCGNZITPAO ++ rm /tmp/tmp.b4B0i5dTw4 /tmp/tmp.iCGNZITPAO ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8WTNB4VycB +++ mktemp ++ local LAST_ERR=/tmp/tmp.GSntoPWnjS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8WTNB4VycB ++ cat /tmp/tmp.GSntoPWnjS ++ rm /tmp/tmp.8WTNB4VycB /tmp/tmp.GSntoPWnjS ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0IEbzlxQ06 +++ mktemp ++ local LAST_ERR=/tmp/tmp.PQPffkPsLl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0IEbzlxQ06 ++ cat /tmp/tmp.PQPffkPsLl ++ rm /tmp/tmp.0IEbzlxQ06 /tmp/tmp.PQPffkPsLl ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kK5oa2wAQB +++ mktemp ++ local LAST_ERR=/tmp/tmp.XBCFct6DED ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kK5oa2wAQB ++ cat /tmp/tmp.XBCFct6DED ++ rm /tmp/tmp.kK5oa2wAQB /tmp/tmp.XBCFct6DED ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vI14dkmHQn +++ mktemp ++ local LAST_ERR=/tmp/tmp.QDgDz1Kzlu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vI14dkmHQn ++ cat /tmp/tmp.QDgDz1Kzlu ++ rm /tmp/tmp.vI14dkmHQn /tmp/tmp.QDgDz1Kzlu ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E4hXqQapP5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.OWzDbn1XSK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E4hXqQapP5 ++ cat /tmp/tmp.OWzDbn1XSK ++ rm /tmp/tmp.E4hXqQapP5 /tmp/tmp.OWzDbn1XSK ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dbwoRmx80W +++ mktemp ++ local LAST_ERR=/tmp/tmp.xWnZvnXgI8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dbwoRmx80W ++ cat /tmp/tmp.xWnZvnXgI8 ++ rm /tmp/tmp.dbwoRmx80W /tmp/tmp.xWnZvnXgI8 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LdO8sADf4r +++ mktemp ++ local LAST_ERR=/tmp/tmp.RPjjz8rqt6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LdO8sADf4r ++ cat /tmp/tmp.RPjjz8rqt6 ++ rm /tmp/tmp.LdO8sADf4r /tmp/tmp.RPjjz8rqt6 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.B2i8a5h1Xh +++ mktemp ++ local LAST_ERR=/tmp/tmp.cIUIoz9qLK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.B2i8a5h1Xh ++ cat /tmp/tmp.cIUIoz9qLK ++ rm /tmp/tmp.B2i8a5h1Xh /tmp/tmp.cIUIoz9qLK ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9tV6G9Tb32 +++ mktemp ++ local LAST_ERR=/tmp/tmp.bp16s289bH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9tV6G9Tb32 ++ cat /tmp/tmp.bp16s289bH ++ rm /tmp/tmp.9tV6G9Tb32 /tmp/tmp.bp16s289bH ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.REIXwT0FRX +++ mktemp ++ local LAST_ERR=/tmp/tmp.6zDo5C0mKS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.REIXwT0FRX ++ cat /tmp/tmp.6zDo5C0mKS ++ rm /tmp/tmp.REIXwT0FRX /tmp/tmp.6zDo5C0mKS ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Aq3jgyyvRO +++ mktemp ++ local LAST_ERR=/tmp/tmp.el9rRGzrB0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Aq3jgyyvRO ++ cat /tmp/tmp.el9rRGzrB0 ++ rm /tmp/tmp.Aq3jgyyvRO /tmp/tmp.el9rRGzrB0 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish.. + [[ true == true ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:17:46+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.LSkuWuhhuk +++ mktemp ++ local LAST_ERR=/tmp/tmp.8CzCYAQLUD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LSkuWuhhuk ++ cat /tmp/tmp.8CzCYAQLUD ++ rm /tmp/tmp.LSkuWuhhuk /tmp/tmp.8CzCYAQLUD ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.d2yIVh94EO ++ mktemp + local LAST_ERR=/tmp/tmp.eyR48csPS5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.d2yIVh94EO + cat /tmp/tmp.eyR48csPS5 + rm /tmp/tmp.d2yIVh94EO /tmp/tmp.eyR48csPS5 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.SERsePjeEv/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:17:49+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Do0pUCE9nG +++ mktemp ++ local LAST_ERR=/tmp/tmp.jQ8RlHKbCI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Do0pUCE9nG ++ cat /tmp/tmp.jQ8RlHKbCI ++ rm /tmp/tmp.Do0pUCE9nG /tmp/tmp.jQ8RlHKbCI ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ospJ2QtyEl ++ mktemp + local LAST_ERR=/tmp/tmp.t1gWgnAJTC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ospJ2QtyEl + cat /tmp/tmp.t1gWgnAJTC + rm /tmp/tmp.ospJ2QtyEl /tmp/tmp.t1gWgnAJTC + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.SERsePjeEv/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:17:51+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.g0DxngKjyW +++ mktemp ++ local LAST_ERR=/tmp/tmp.3xupFQOEls ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.g0DxngKjyW ++ cat /tmp/tmp.3xupFQOEls ++ rm /tmp/tmp.g0DxngKjyW /tmp/tmp.3xupFQOEls ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ITvHOjdQJz ++ mktemp + local LAST_ERR=/tmp/tmp.WvSwTxzDmd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ITvHOjdQJz + cat /tmp/tmp.WvSwTxzDmd + rm /tmp/tmp.ITvHOjdQJz /tmp/tmp.WvSwTxzDmd + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.SERsePjeEv/find + run_restore backup-gcp-cs + local backup_name=backup-gcp-cs + log 'drop collection' + set +o xtrace [2025-05-21T23:17:53+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-21272 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LJ7yQ1oX2l +++ mktemp ++ local LAST_ERR=/tmp/tmp.I0d9Hd0waY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LJ7yQ1oX2l ++ cat /tmp/tmp.I0d9Hd0waY ++ rm /tmp/tmp.LJ7yQ1oX2l /tmp/tmp.I0d9Hd0waY ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.5xgHDoOriB ++ mktemp + local LAST_ERR=/tmp/tmp.jL5jWX2Sju + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5xgHDoOriB Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("a5a2df69-1efc-4f62-8537-5bc9675a578d") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.jL5jWX2Sju + rm /tmp/tmp.5xgHDoOriB /tmp/tmp.jL5jWX2Sju + return 0 + log 'check backup and restore -- backup-gcp-cs' + set +o xtrace [2025-05-21T23:17:55+0000] check backup and restore -- backup-gcp-cs + /usr/bin/sed -e 's/name:/name: restore-backup-gcp-cs/' + /usr/bin/sed -e 's/backupName:/backupName: backup-gcp-cs/' + kubectl_bin apply -f - + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/restore.yml ++ mktemp + local LAST_OUT=/tmp/tmp.gsRcVE9xVX ++ mktemp + local LAST_ERR=/tmp/tmp.ZK458Nsdd7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gsRcVE9xVX perconaservermongodbrestore.psmdb.percona.com/restore-backup-gcp-cs created + cat /tmp/tmp.ZK458Nsdd7 + rm /tmp/tmp.gsRcVE9xVX /tmp/tmp.ZK458Nsdd7 + return 0 + run_recovery_check backup-gcp-cs + local backup_name=backup-gcp-cs + local compare_suffix=_restore + local base=true + wait_restore backup-gcp-cs some-name requested 0 3000 + local backup_name=backup-gcp-cs + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-gcp-cs object to be createdOK Waiting psmdb-restore/restore-backup-gcp-cs to reach state "requested" ..OK + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore + local resource=statefulset/some-name-rs0 + local postfix=_restore + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml + local new_result=/tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-21272", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.DWjy5VmUfF ++ mktemp + local LAST_ERR=/tmp/tmp.z23Hh5Oesd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DWjy5VmUfF + cat /tmp/tmp.z23Hh5Oesd + rm /tmp/tmp.DWjy5VmUfF /tmp/tmp.z23Hh5Oesd + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + wait_restore backup-gcp-cs some-name ready 0 1800 + local backup_name=backup-gcp-cs + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-gcp-cs object to be createdOK Waiting psmdb-restore/restore-backup-gcp-cs to reach state "ready" .OK + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iA3jOR55pz +++ mktemp ++ local LAST_ERR=/tmp/tmp.hDV3CvOgbp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iA3jOR55pz ++ cat /tmp/tmp.hDV3CvOgbp ++ rm /tmp/tmp.iA3jOR55pz /tmp/tmp.hDV3CvOgbp ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gL2PXCGfj3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.KBJjGgCiHE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gL2PXCGfj3 ++ cat /tmp/tmp.KBJjGgCiHE ++ rm /tmp/tmp.gL2PXCGfj3 /tmp/tmp.KBJjGgCiHE ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.L24f5N6OJ0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.eGdEbti614 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.L24f5N6OJ0 ++ cat /tmp/tmp.eGdEbti614 ++ rm /tmp/tmp.L24f5N6OJ0 /tmp/tmp.eGdEbti614 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.x5ZxWyepaD +++ mktemp ++ local LAST_ERR=/tmp/tmp.NJ5EYj9Zxw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.x5ZxWyepaD ++ cat /tmp/tmp.NJ5EYj9Zxw ++ rm /tmp/tmp.x5ZxWyepaD /tmp/tmp.NJ5EYj9Zxw ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.N4V1r1KmSs +++ mktemp ++ local LAST_ERR=/tmp/tmp.uywXzePT7L ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.N4V1r1KmSs ++ cat /tmp/tmp.uywXzePT7L ++ rm /tmp/tmp.N4V1r1KmSs /tmp/tmp.uywXzePT7L ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AjGwsKBHwe +++ mktemp ++ local LAST_ERR=/tmp/tmp.Cq6840UoyA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AjGwsKBHwe ++ cat /tmp/tmp.Cq6840UoyA ++ rm /tmp/tmp.AjGwsKBHwe /tmp/tmp.Cq6840UoyA ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FdcGSkS2Rm +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZuRCYuU1pO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FdcGSkS2Rm ++ cat /tmp/tmp.ZuRCYuU1pO ++ rm /tmp/tmp.FdcGSkS2Rm /tmp/tmp.ZuRCYuU1pO ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.c78JnqsOJu +++ mktemp ++ local LAST_ERR=/tmp/tmp.3dpo9IDXNw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.c78JnqsOJu ++ cat /tmp/tmp.3dpo9IDXNw ++ rm /tmp/tmp.c78JnqsOJu /tmp/tmp.3dpo9IDXNw ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bjLu2wdnyY +++ mktemp ++ local LAST_ERR=/tmp/tmp.RTqVSKnLWm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bjLu2wdnyY ++ cat /tmp/tmp.RTqVSKnLWm ++ rm /tmp/tmp.bjLu2wdnyY /tmp/tmp.RTqVSKnLWm ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eCoZyCZuDA +++ mktemp ++ local LAST_ERR=/tmp/tmp.VwWsozidbU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eCoZyCZuDA ++ cat /tmp/tmp.VwWsozidbU ++ rm /tmp/tmp.eCoZyCZuDA /tmp/tmp.VwWsozidbU ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2XfHJHvbm2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.hi7c86zW3O ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2XfHJHvbm2 ++ cat /tmp/tmp.hi7c86zW3O ++ rm /tmp/tmp.2XfHJHvbm2 /tmp/tmp.hi7c86zW3O ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nc2n1V38pM +++ mktemp ++ local LAST_ERR=/tmp/tmp.YEid8e8z9r ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nc2n1V38pM ++ cat /tmp/tmp.YEid8e8z9r ++ rm /tmp/tmp.nc2n1V38pM /tmp/tmp.YEid8e8z9r ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9LkEpFRqWO +++ mktemp ++ local LAST_ERR=/tmp/tmp.AWwGYT33wb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9LkEpFRqWO ++ cat /tmp/tmp.AWwGYT33wb ++ rm /tmp/tmp.9LkEpFRqWO /tmp/tmp.AWwGYT33wb ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HsSmUpIBPz +++ mktemp ++ local LAST_ERR=/tmp/tmp.05wVteVUU1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HsSmUpIBPz ++ cat /tmp/tmp.05wVteVUU1 ++ rm /tmp/tmp.HsSmUpIBPz /tmp/tmp.05wVteVUU1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.REtWScr4O2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.m2ucn4hI68 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.REtWScr4O2 ++ cat /tmp/tmp.m2ucn4hI68 ++ rm /tmp/tmp.REtWScr4O2 /tmp/tmp.m2ucn4hI68 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish.. + [[ true == true ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:24:35+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Jp1eZ7UOgh +++ mktemp ++ local LAST_ERR=/tmp/tmp.RcZzULbREy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Jp1eZ7UOgh ++ cat /tmp/tmp.RcZzULbREy ++ rm /tmp/tmp.Jp1eZ7UOgh /tmp/tmp.RcZzULbREy ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.W6kFeKzDRL ++ mktemp + local LAST_ERR=/tmp/tmp.axcHsPDdVh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.W6kFeKzDRL + cat /tmp/tmp.axcHsPDdVh + rm /tmp/tmp.W6kFeKzDRL /tmp/tmp.axcHsPDdVh + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.SERsePjeEv/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:24:38+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cSBcikUNvL +++ mktemp ++ local LAST_ERR=/tmp/tmp.5yQxtutZNY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cSBcikUNvL ++ cat /tmp/tmp.5yQxtutZNY ++ rm /tmp/tmp.cSBcikUNvL /tmp/tmp.5yQxtutZNY ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.DKNn7sDPDM ++ mktemp + local LAST_ERR=/tmp/tmp.T0TFYSQ1wR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DKNn7sDPDM + cat /tmp/tmp.T0TFYSQ1wR + rm /tmp/tmp.DKNn7sDPDM /tmp/tmp.T0TFYSQ1wR + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.SERsePjeEv/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:24:41+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nc5ZEgQ1Oe +++ mktemp ++ local LAST_ERR=/tmp/tmp.MarBH0E7BT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nc5ZEgQ1Oe ++ cat /tmp/tmp.MarBH0E7BT ++ rm /tmp/tmp.nc5ZEgQ1Oe /tmp/tmp.MarBH0E7BT ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.cz2iVQ1IeY ++ mktemp + local LAST_ERR=/tmp/tmp.SW1KnMHHd9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cz2iVQ1IeY + cat /tmp/tmp.SW1KnMHHd9 + rm /tmp/tmp.cz2iVQ1IeY /tmp/tmp.SW1KnMHHd9 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.SERsePjeEv/find + run_restore backup-azure-blob + local backup_name=backup-azure-blob + log 'drop collection' + set +o xtrace [2025-05-21T23:24:44+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-21272 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.I0p1VyXIJu +++ mktemp ++ local LAST_ERR=/tmp/tmp.6CQJwaEhtk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.I0p1VyXIJu ++ cat /tmp/tmp.6CQJwaEhtk ++ rm /tmp/tmp.I0p1VyXIJu /tmp/tmp.6CQJwaEhtk ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.JkWSWIwSEx ++ mktemp + local LAST_ERR=/tmp/tmp.bFWivb4Qoh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JkWSWIwSEx Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("1243f5f9-af62-417d-920a-34f7e3f3f2be") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.bFWivb4Qoh + rm /tmp/tmp.JkWSWIwSEx /tmp/tmp.bFWivb4Qoh + return 0 + log 'check backup and restore -- backup-azure-blob' + set +o xtrace [2025-05-21T23:24:47+0000] check backup and restore -- backup-azure-blob + /usr/bin/sed -e 's/backupName:/backupName: backup-azure-blob/' + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/restore.yml + /usr/bin/sed -e 's/name:/name: restore-backup-azure-blob/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.ZG46dKolGu ++ mktemp + local LAST_ERR=/tmp/tmp.sizqDg06GF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZG46dKolGu perconaservermongodbrestore.psmdb.percona.com/restore-backup-azure-blob created + cat /tmp/tmp.sizqDg06GF + rm /tmp/tmp.ZG46dKolGu /tmp/tmp.sizqDg06GF + return 0 + run_recovery_check backup-azure-blob + local backup_name=backup-azure-blob + local compare_suffix=_restore + local base=true + wait_restore backup-azure-blob some-name requested 0 3000 + local backup_name=backup-azure-blob + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-azure-blob object to be createdOK Waiting psmdb-restore/restore-backup-azure-blob to reach state "requested" ..OK + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore + local resource=statefulset/some-name-rs0 + local postfix=_restore + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml + local new_result=/tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-21272", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.4qsFraA1dy ++ mktemp + local LAST_ERR=/tmp/tmp.AjKscEM3Z6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4qsFraA1dy + cat /tmp/tmp.AjKscEM3Z6 + rm /tmp/tmp.4qsFraA1dy /tmp/tmp.AjKscEM3Z6 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + wait_restore backup-azure-blob some-name ready 0 1800 + local backup_name=backup-azure-blob + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-azure-blob object to be createdOK Waiting psmdb-restore/restore-backup-azure-blob to reach state "ready" .OK + [[ 0 -eq 1 ]] ++ yq '.metadata.annotations."percona.com/resync-pbm"' ++ kubectl_bin get psmdb some-name -o yaml +++ mktemp ++ local LAST_OUT=/tmp/tmp.HHbTBoLbvE +++ mktemp ++ local LAST_ERR=/tmp/tmp.xvOQIDgmhr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HHbTBoLbvE ++ cat /tmp/tmp.xvOQIDgmhr ++ rm /tmp/tmp.HHbTBoLbvE /tmp/tmp.xvOQIDgmhr ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fCOoSGvDjO +++ mktemp ++ local LAST_ERR=/tmp/tmp.m73v9rkBZX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fCOoSGvDjO ++ cat /tmp/tmp.m73v9rkBZX ++ rm /tmp/tmp.fCOoSGvDjO /tmp/tmp.m73v9rkBZX ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0gNgnaTe1i +++ mktemp ++ local LAST_ERR=/tmp/tmp.t9f9Dg0FgC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0gNgnaTe1i ++ cat /tmp/tmp.t9f9Dg0FgC ++ rm /tmp/tmp.0gNgnaTe1i /tmp/tmp.t9f9Dg0FgC ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rYhOyDemtf +++ mktemp ++ local LAST_ERR=/tmp/tmp.1KNL7hGFyb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rYhOyDemtf ++ cat /tmp/tmp.1KNL7hGFyb ++ rm /tmp/tmp.rYhOyDemtf /tmp/tmp.1KNL7hGFyb ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.63TRcge6KG +++ mktemp ++ local LAST_ERR=/tmp/tmp.SxqVOHEiUe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.63TRcge6KG ++ cat /tmp/tmp.SxqVOHEiUe ++ rm /tmp/tmp.63TRcge6KG /tmp/tmp.SxqVOHEiUe ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.80SZxiqrN3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.NbcEiyBdpZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.80SZxiqrN3 ++ cat /tmp/tmp.NbcEiyBdpZ ++ rm /tmp/tmp.80SZxiqrN3 /tmp/tmp.NbcEiyBdpZ ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aWrmxOLyQG +++ mktemp ++ local LAST_ERR=/tmp/tmp.283cIPDDRl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aWrmxOLyQG ++ cat /tmp/tmp.283cIPDDRl ++ rm /tmp/tmp.aWrmxOLyQG /tmp/tmp.283cIPDDRl ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vILvkidR9Y +++ mktemp ++ local LAST_ERR=/tmp/tmp.LcwsijG4E1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vILvkidR9Y ++ cat /tmp/tmp.LcwsijG4E1 ++ rm /tmp/tmp.vILvkidR9Y /tmp/tmp.LcwsijG4E1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jOyLaMLiZv +++ mktemp ++ local LAST_ERR=/tmp/tmp.n11N9kqNif ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jOyLaMLiZv ++ cat /tmp/tmp.n11N9kqNif ++ rm /tmp/tmp.jOyLaMLiZv /tmp/tmp.n11N9kqNif ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FLZLISQw0z +++ mktemp ++ local LAST_ERR=/tmp/tmp.ToHF6aN2yp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FLZLISQw0z ++ cat /tmp/tmp.ToHF6aN2yp ++ rm /tmp/tmp.FLZLISQw0z /tmp/tmp.ToHF6aN2yp ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sbL4igADZC +++ mktemp ++ local LAST_ERR=/tmp/tmp.wOSmNhzKLV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sbL4igADZC ++ cat /tmp/tmp.wOSmNhzKLV ++ rm /tmp/tmp.sbL4igADZC /tmp/tmp.wOSmNhzKLV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dptY8CaSIw +++ mktemp ++ local LAST_ERR=/tmp/tmp.37L6qHeOFb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dptY8CaSIw ++ cat /tmp/tmp.37L6qHeOFb ++ rm /tmp/tmp.dptY8CaSIw /tmp/tmp.37L6qHeOFb ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bBiJjaX6Vk +++ mktemp ++ local LAST_ERR=/tmp/tmp.V1vczrix4o ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bBiJjaX6Vk ++ cat /tmp/tmp.V1vczrix4o ++ rm /tmp/tmp.bBiJjaX6Vk /tmp/tmp.V1vczrix4o ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.65E0ZVa1m6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.1NHcQ5EMvx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.65E0ZVa1m6 ++ cat /tmp/tmp.1NHcQ5EMvx ++ rm /tmp/tmp.65E0ZVa1m6 /tmp/tmp.1NHcQ5EMvx ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8EY4MNfm19 +++ mktemp ++ local LAST_ERR=/tmp/tmp.cSX5uODgcy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8EY4MNfm19 ++ cat /tmp/tmp.cSX5uODgcy ++ rm /tmp/tmp.8EY4MNfm19 /tmp/tmp.cSX5uODgcy ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish.. + [[ true == true ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:31:19+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3oCR8wdbJc +++ mktemp ++ local LAST_ERR=/tmp/tmp.uOQWYVGCPq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3oCR8wdbJc ++ cat /tmp/tmp.uOQWYVGCPq ++ rm /tmp/tmp.3oCR8wdbJc /tmp/tmp.uOQWYVGCPq ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.3RUnxwmQBo ++ mktemp + local LAST_ERR=/tmp/tmp.8JAvBJ9ohH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3RUnxwmQBo + cat /tmp/tmp.8JAvBJ9ohH + rm /tmp/tmp.3RUnxwmQBo /tmp/tmp.8JAvBJ9ohH + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.SERsePjeEv/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:31:21+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iVCwCKX5Cg +++ mktemp ++ local LAST_ERR=/tmp/tmp.GhurKEtrMx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iVCwCKX5Cg ++ cat /tmp/tmp.GhurKEtrMx ++ rm /tmp/tmp.iVCwCKX5Cg /tmp/tmp.GhurKEtrMx ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.l4ZllQOzGO ++ mktemp + local LAST_ERR=/tmp/tmp.aSvcTS3NnM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.l4ZllQOzGO + cat /tmp/tmp.aSvcTS3NnM + rm /tmp/tmp.l4ZllQOzGO /tmp/tmp.aSvcTS3NnM + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.SERsePjeEv/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:31:23+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZHVtPwkW1O +++ mktemp ++ local LAST_ERR=/tmp/tmp.8JKNh7WT2I ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZHVtPwkW1O ++ cat /tmp/tmp.8JKNh7WT2I ++ rm /tmp/tmp.ZHVtPwkW1O /tmp/tmp.8JKNh7WT2I ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.0sC1xmMyZN ++ mktemp + local LAST_ERR=/tmp/tmp.JAyMO5vL0K + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0sC1xmMyZN + cat /tmp/tmp.JAyMO5vL0K + rm /tmp/tmp.0sC1xmMyZN /tmp/tmp.JAyMO5vL0K + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.SERsePjeEv/find + run_restore backup-minio-not-base + local backup_name=backup-minio-not-base + log 'drop collection' + set +o xtrace [2025-05-21T23:31:26+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-21272 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0uqgp3d7pI +++ mktemp ++ local LAST_ERR=/tmp/tmp.chlnwMj4hy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0uqgp3d7pI ++ cat /tmp/tmp.chlnwMj4hy ++ rm /tmp/tmp.0uqgp3d7pI /tmp/tmp.chlnwMj4hy ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.IpPzUocKya ++ mktemp + local LAST_ERR=/tmp/tmp.JwUMZhGz8v + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IpPzUocKya Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("cc928755-a3a1-474f-9941-07c56abb82ba") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.JwUMZhGz8v + rm /tmp/tmp.IpPzUocKya /tmp/tmp.JwUMZhGz8v + return 0 + log 'check backup and restore -- backup-minio-not-base' + set +o xtrace [2025-05-21T23:31:28+0000] check backup and restore -- backup-minio-not-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/restore.yml + /usr/bin/sed -e 's/backupName:/backupName: backup-minio-not-base/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.sVxtCK7LNG + /usr/bin/sed -e 's/name:/name: restore-backup-minio-not-base/' ++ mktemp + local LAST_ERR=/tmp/tmp.fMEQdaq2Ba + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sVxtCK7LNG perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-not-base created + cat /tmp/tmp.fMEQdaq2Ba + rm /tmp/tmp.sVxtCK7LNG /tmp/tmp.fMEQdaq2Ba + return 0 + run_recovery_check backup-minio-not-base '' false + local backup_name=backup-minio-not-base + local compare_suffix=_restore + local base=false + wait_restore backup-minio-not-base some-name requested 0 3000 + local backup_name=backup-minio-not-base + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-not-base object to be createdOK Waiting psmdb-restore/restore-backup-minio-not-base to reach state "requested" .OK + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore + local resource=statefulset/some-name-rs0 + local postfix=_restore + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml + local new_result=/tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-21272", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.tLnQhbTxpF ++ mktemp + local LAST_ERR=/tmp/tmp.K4yd20FIRV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tLnQhbTxpF + cat /tmp/tmp.K4yd20FIRV + rm /tmp/tmp.tLnQhbTxpF /tmp/tmp.K4yd20FIRV + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + wait_restore backup-minio-not-base some-name ready 0 1800 + local backup_name=backup-minio-not-base + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-not-base object to be createdOK Waiting psmdb-restore/restore-backup-minio-not-base to reach state "ready" .OK + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TtUtwdbVbD +++ mktemp ++ local LAST_ERR=/tmp/tmp.waDsu3gYE1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TtUtwdbVbD ++ cat /tmp/tmp.waDsu3gYE1 ++ rm /tmp/tmp.TtUtwdbVbD /tmp/tmp.waDsu3gYE1 ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.phAJV1CYT7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.x5rAdI12kc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.phAJV1CYT7 ++ cat /tmp/tmp.x5rAdI12kc ++ rm /tmp/tmp.phAJV1CYT7 /tmp/tmp.x5rAdI12kc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.P5iW2Wnjcr +++ mktemp ++ local LAST_ERR=/tmp/tmp.8WwyYk6yz5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.P5iW2Wnjcr ++ cat /tmp/tmp.8WwyYk6yz5 ++ rm /tmp/tmp.P5iW2Wnjcr /tmp/tmp.8WwyYk6yz5 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nWrKofxfr4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.NYd5OcVpDC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nWrKofxfr4 ++ cat /tmp/tmp.NYd5OcVpDC ++ rm /tmp/tmp.nWrKofxfr4 /tmp/tmp.NYd5OcVpDC ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HgAvkswCqr +++ mktemp ++ local LAST_ERR=/tmp/tmp.v9SyyPOh5D ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HgAvkswCqr ++ cat /tmp/tmp.v9SyyPOh5D ++ rm /tmp/tmp.HgAvkswCqr /tmp/tmp.v9SyyPOh5D ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.toSn3oNjC2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.OZnHjJ0Llw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.toSn3oNjC2 ++ cat /tmp/tmp.OZnHjJ0Llw ++ rm /tmp/tmp.toSn3oNjC2 /tmp/tmp.OZnHjJ0Llw ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.D8NPT7jc0q +++ mktemp ++ local LAST_ERR=/tmp/tmp.DGLsWattas ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.D8NPT7jc0q ++ cat /tmp/tmp.DGLsWattas ++ rm /tmp/tmp.D8NPT7jc0q /tmp/tmp.DGLsWattas ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.San61s5IZd +++ mktemp ++ local LAST_ERR=/tmp/tmp.BPWA25W8Cz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.San61s5IZd ++ cat /tmp/tmp.BPWA25W8Cz ++ rm /tmp/tmp.San61s5IZd /tmp/tmp.BPWA25W8Cz ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RW3Ojp56fm +++ mktemp ++ local LAST_ERR=/tmp/tmp.5kT1CTcB0X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RW3Ojp56fm ++ cat /tmp/tmp.5kT1CTcB0X ++ rm /tmp/tmp.RW3Ojp56fm /tmp/tmp.5kT1CTcB0X ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BPXUJEwTuy +++ mktemp ++ local LAST_ERR=/tmp/tmp.RXKHNC5U1D ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BPXUJEwTuy ++ cat /tmp/tmp.RXKHNC5U1D ++ rm /tmp/tmp.BPXUJEwTuy /tmp/tmp.RXKHNC5U1D ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ClfOHQzSK3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.hNym1BI2wU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ClfOHQzSK3 ++ cat /tmp/tmp.hNym1BI2wU ++ rm /tmp/tmp.ClfOHQzSK3 /tmp/tmp.hNym1BI2wU ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.retUYc7eee +++ mktemp ++ local LAST_ERR=/tmp/tmp.L19TkBUPf8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.retUYc7eee ++ cat /tmp/tmp.L19TkBUPf8 ++ rm /tmp/tmp.retUYc7eee /tmp/tmp.L19TkBUPf8 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.p21ClPbbx3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ftESnBE8in ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.p21ClPbbx3 ++ cat /tmp/tmp.ftESnBE8in ++ rm /tmp/tmp.p21ClPbbx3 /tmp/tmp.ftESnBE8in ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NAwoPT9Mti +++ mktemp ++ local LAST_ERR=/tmp/tmp.2SXNj6O10N ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NAwoPT9Mti ++ cat /tmp/tmp.2SXNj6O10N ++ rm /tmp/tmp.NAwoPT9Mti /tmp/tmp.2SXNj6O10N ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EolXXw68rU +++ mktemp ++ local LAST_ERR=/tmp/tmp.Pw53OJNvDE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EolXXw68rU ++ cat /tmp/tmp.Pw53OJNvDE ++ rm /tmp/tmp.EolXXw68rU /tmp/tmp.Pw53OJNvDE ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish.. + [[ false == true ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:37:40+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qDXBTDxhi0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.6mJ4h5cS8H ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qDXBTDxhi0 ++ cat /tmp/tmp.6mJ4h5cS8H ++ rm /tmp/tmp.qDXBTDxhi0 /tmp/tmp.6mJ4h5cS8H ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.XPcbeYdTYy ++ mktemp + local LAST_ERR=/tmp/tmp.vB7Cqs0NaH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XPcbeYdTYy + cat /tmp/tmp.vB7Cqs0NaH + rm /tmp/tmp.XPcbeYdTYy /tmp/tmp.vB7Cqs0NaH + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.SERsePjeEv/find-not-base + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:37:42+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2tC1nDmRL6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.E95UV6mhPj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2tC1nDmRL6 ++ cat /tmp/tmp.E95UV6mhPj ++ rm /tmp/tmp.2tC1nDmRL6 /tmp/tmp.E95UV6mhPj ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.nEYIKJsujK ++ mktemp + local LAST_ERR=/tmp/tmp.D91jCv75B1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nEYIKJsujK + cat /tmp/tmp.D91jCv75B1 + rm /tmp/tmp.nEYIKJsujK /tmp/tmp.D91jCv75B1 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.SERsePjeEv/find-not-base + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:37:44+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qalt8GNR1N +++ mktemp ++ local LAST_ERR=/tmp/tmp.Wod42gw0SY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qalt8GNR1N ++ cat /tmp/tmp.Wod42gw0SY ++ rm /tmp/tmp.qalt8GNR1N /tmp/tmp.Wod42gw0SY ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.QJDu9D9SDS ++ mktemp + local LAST_ERR=/tmp/tmp.3yoYm1HbFt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QJDu9D9SDS + cat /tmp/tmp.3yoYm1HbFt + rm /tmp/tmp.QJDu9D9SDS /tmp/tmp.3yoYm1HbFt + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.SERsePjeEv/find-not-base + run_restore backup-minio + local backup_name=backup-minio + log 'drop collection' + set +o xtrace [2025-05-21T23:37:46+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-21272 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oEsWpZEUSw +++ mktemp ++ local LAST_ERR=/tmp/tmp.8IgUyObbur ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oEsWpZEUSw ++ cat /tmp/tmp.8IgUyObbur ++ rm /tmp/tmp.oEsWpZEUSw /tmp/tmp.8IgUyObbur ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.1nMCVyYXR7 ++ mktemp + local LAST_ERR=/tmp/tmp.u0fnyRcT3V + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1nMCVyYXR7 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("b96603bd-b527-4c6d-bb5a-523c8f5b6bfc") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.u0fnyRcT3V + rm /tmp/tmp.1nMCVyYXR7 /tmp/tmp.u0fnyRcT3V + return 0 + log 'check backup and restore -- backup-minio' + set +o xtrace [2025-05-21T23:37:48+0000] check backup and restore -- backup-minio + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/restore.yml + /usr/bin/sed -e 's/backupName:/backupName: backup-minio/' + /usr/bin/sed -e 's/name:/name: restore-backup-minio/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.PC6nNyLWyu ++ mktemp + local LAST_ERR=/tmp/tmp.8E5bIpF2DQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PC6nNyLWyu perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio created + cat /tmp/tmp.8E5bIpF2DQ + rm /tmp/tmp.PC6nNyLWyu /tmp/tmp.8E5bIpF2DQ + return 0 + run_recovery_check backup-minio + local backup_name=backup-minio + local compare_suffix=_restore + local base=true + wait_restore backup-minio some-name requested 0 3000 + local backup_name=backup-minio + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio object to be createdOK Waiting psmdb-restore/restore-backup-minio to reach state "requested" .OK + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore + local resource=statefulset/some-name-rs0 + local postfix=_restore + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml + local new_result=/tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-21272", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.FRGrmltVyR ++ mktemp + local LAST_ERR=/tmp/tmp.2VTr1u4nYz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FRGrmltVyR + cat /tmp/tmp.2VTr1u4nYz + rm /tmp/tmp.FRGrmltVyR /tmp/tmp.2VTr1u4nYz + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + wait_restore backup-minio some-name ready 0 1800 + local backup_name=backup-minio + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio object to be createdOK Waiting psmdb-restore/restore-backup-minio to reach state "ready" .OK + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fJgoHNdLQU +++ mktemp ++ local LAST_ERR=/tmp/tmp.VSPR6nhDzY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fJgoHNdLQU ++ cat /tmp/tmp.VSPR6nhDzY ++ rm /tmp/tmp.fJgoHNdLQU /tmp/tmp.VSPR6nhDzY ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AGeHlHwI9n +++ mktemp ++ local LAST_ERR=/tmp/tmp.wkCuxnHzPe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AGeHlHwI9n ++ cat /tmp/tmp.wkCuxnHzPe ++ rm /tmp/tmp.AGeHlHwI9n /tmp/tmp.wkCuxnHzPe ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.V7poBGLnjR +++ mktemp ++ local LAST_ERR=/tmp/tmp.kCE9WpGO6C ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.V7poBGLnjR ++ cat /tmp/tmp.kCE9WpGO6C ++ rm /tmp/tmp.V7poBGLnjR /tmp/tmp.kCE9WpGO6C ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gcElspOyc8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.CeOtZLthqi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gcElspOyc8 ++ cat /tmp/tmp.CeOtZLthqi ++ rm /tmp/tmp.gcElspOyc8 /tmp/tmp.CeOtZLthqi ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OTFKCOXDyZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.BTTvPR9wGo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OTFKCOXDyZ ++ cat /tmp/tmp.BTTvPR9wGo ++ rm /tmp/tmp.OTFKCOXDyZ /tmp/tmp.BTTvPR9wGo ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Y1hZ3wNNaT +++ mktemp ++ local LAST_ERR=/tmp/tmp.rQiZslkYRG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Y1hZ3wNNaT ++ cat /tmp/tmp.rQiZslkYRG ++ rm /tmp/tmp.Y1hZ3wNNaT /tmp/tmp.rQiZslkYRG ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CjarVSeM9P +++ mktemp ++ local LAST_ERR=/tmp/tmp.Pnizwy8Hk4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CjarVSeM9P ++ cat /tmp/tmp.Pnizwy8Hk4 ++ rm /tmp/tmp.CjarVSeM9P /tmp/tmp.Pnizwy8Hk4 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lEYvLLdOdl +++ mktemp ++ local LAST_ERR=/tmp/tmp.STOYwxSN34 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lEYvLLdOdl ++ cat /tmp/tmp.STOYwxSN34 ++ rm /tmp/tmp.lEYvLLdOdl /tmp/tmp.STOYwxSN34 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.P59yL8BV6h +++ mktemp ++ local LAST_ERR=/tmp/tmp.YLqp50VX9E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.P59yL8BV6h ++ cat /tmp/tmp.YLqp50VX9E ++ rm /tmp/tmp.P59yL8BV6h /tmp/tmp.YLqp50VX9E ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qhEJrlSrkH +++ mktemp ++ local LAST_ERR=/tmp/tmp.H3eIHD4Lwc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qhEJrlSrkH ++ cat /tmp/tmp.H3eIHD4Lwc ++ rm /tmp/tmp.qhEJrlSrkH /tmp/tmp.H3eIHD4Lwc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZZathPHblM +++ mktemp ++ local LAST_ERR=/tmp/tmp.oz84Mqdcl2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZZathPHblM ++ cat /tmp/tmp.oz84Mqdcl2 ++ rm /tmp/tmp.ZZathPHblM /tmp/tmp.oz84Mqdcl2 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1EArmpMfXl +++ mktemp ++ local LAST_ERR=/tmp/tmp.fux8jOgLaN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1EArmpMfXl ++ cat /tmp/tmp.fux8jOgLaN ++ rm /tmp/tmp.1EArmpMfXl /tmp/tmp.fux8jOgLaN ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E7ITuEyCfZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.1BxZe8JYd4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E7ITuEyCfZ ++ cat /tmp/tmp.1BxZe8JYd4 ++ rm /tmp/tmp.E7ITuEyCfZ /tmp/tmp.1BxZe8JYd4 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.h5SoSFl8tS +++ mktemp ++ local LAST_ERR=/tmp/tmp.9YeKLzOcLC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.h5SoSFl8tS ++ cat /tmp/tmp.9YeKLzOcLC ++ rm /tmp/tmp.h5SoSFl8tS /tmp/tmp.9YeKLzOcLC ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cn192ilB8k +++ mktemp ++ local LAST_ERR=/tmp/tmp.3Yru4W2n45 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cn192ilB8k ++ cat /tmp/tmp.3Yru4W2n45 ++ rm /tmp/tmp.cn192ilB8k /tmp/tmp.3Yru4W2n45 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NHjPYEUHyC +++ mktemp ++ local LAST_ERR=/tmp/tmp.VMZxT3z3U5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NHjPYEUHyC ++ cat /tmp/tmp.VMZxT3z3U5 ++ rm /tmp/tmp.NHjPYEUHyC /tmp/tmp.VMZxT3z3U5 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish. + [[ true == true ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:44:17+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bR7iab3wI0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.8AsVRktajE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bR7iab3wI0 ++ cat /tmp/tmp.8AsVRktajE ++ rm /tmp/tmp.bR7iab3wI0 /tmp/tmp.8AsVRktajE ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.3UeTmPfOzg ++ mktemp + local LAST_ERR=/tmp/tmp.N5GOPWHxRI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3UeTmPfOzg + cat /tmp/tmp.N5GOPWHxRI + rm /tmp/tmp.3UeTmPfOzg /tmp/tmp.N5GOPWHxRI + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.SERsePjeEv/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:44:19+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jNS13Qe9EJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.n9YZuswHzg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jNS13Qe9EJ ++ cat /tmp/tmp.n9YZuswHzg ++ rm /tmp/tmp.jNS13Qe9EJ /tmp/tmp.n9YZuswHzg ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.O8NFeXIKlD ++ mktemp + local LAST_ERR=/tmp/tmp.RY42xSeSm5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.O8NFeXIKlD + cat /tmp/tmp.RY42xSeSm5 + rm /tmp/tmp.O8NFeXIKlD /tmp/tmp.RY42xSeSm5 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.SERsePjeEv/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:44:21+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KWU9QYiI9D +++ mktemp ++ local LAST_ERR=/tmp/tmp.hekR2GxFxJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KWU9QYiI9D ++ cat /tmp/tmp.hekR2GxFxJ ++ rm /tmp/tmp.KWU9QYiI9D /tmp/tmp.hekR2GxFxJ ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.DopRdnYYRi ++ mktemp + local LAST_ERR=/tmp/tmp.GGrOBcqCXu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DopRdnYYRi + cat /tmp/tmp.GGrOBcqCXu + rm /tmp/tmp.DopRdnYYRi /tmp/tmp.GGrOBcqCXu + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.SERsePjeEv/find + desc 'Testing with arbiter and non-voting nodes' + set +o xtrace ----------------------------------------------------------------------------------- Testing with arbiter and non-voting nodes ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/some-name-arbiter-nv.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/some-name-arbiter-nv.yml + kubectl_bin apply -f - + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' ++ mktemp + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/some-name-arbiter-nv.yml + local LAST_OUT=/tmp/tmp.Fotj1q8Yxp + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1938-23826c20"' ++ mktemp + local LAST_ERR=/tmp/tmp.H8nxL41IG6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Fotj1q8Yxp perconaservermongodb.psmdb.percona.com/some-name configured + cat /tmp/tmp.H8nxL41IG6 + rm /tmp/tmp.Fotj1q8Yxp /tmp/tmp.H8nxL41IG6 + return 0 + log 'check if all pods started' + set +o xtrace [2025-05-21T23:44:26+0000] check if all pods started + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lGAAIjQmBM +++ mktemp ++ local LAST_ERR=/tmp/tmp.60l1IvEUes ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lGAAIjQmBM ++ cat /tmp/tmp.60l1IvEUes ++ rm /tmp/tmp.lGAAIjQmBM /tmp/tmp.60l1IvEUes ++ return 0 + [[ true == \t\r\u\e ]] + wait_pod some-name-rs0-arbiter-0 + local pod=some-name-rs0-arbiter-0 + set +o xtrace waiting for pod/some-name-rs0-arbiter-0 to be ready......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mb4XMERi9U +++ mktemp ++ local LAST_ERR=/tmp/tmp.SxcggkGjQL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mb4XMERi9U ++ cat /tmp/tmp.SxcggkGjQL ++ rm /tmp/tmp.mb4XMERi9U /tmp/tmp.SxcggkGjQL ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.... + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TjXdL8J31n +++ mktemp ++ local LAST_ERR=/tmp/tmp.wpl9Idr4mH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TjXdL8J31n ++ cat /tmp/tmp.wpl9Idr4mH ++ rm /tmp/tmp.TjXdL8J31n /tmp/tmp.wpl9Idr4mH ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + log 'running backups' + set +o xtrace [2025-05-21T23:45:10+0000] running backups + backup_name_minio=backup-minio-arbiter-nv + run_backup minio backup-minio-arbiter-nv + local storage=minio + local backup_name=backup-minio-arbiter-nv + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/backup.yml + /usr/bin/sed -e 's/name:/name: backup-minio-arbiter-nv/' + /usr/bin/sed -e 's/storageName:/storageName: minio/' + kubectl_bin apply -f - ++ mktemp + yq '.spec.type="incremental-base"' + local LAST_OUT=/tmp/tmp.2o4d0zc6L0 ++ mktemp + local LAST_ERR=/tmp/tmp.kV81vJW6Xv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2o4d0zc6L0 perconaservermongodbbackup.psmdb.percona.com/backup-minio-arbiter-nv created + cat /tmp/tmp.kV81vJW6Xv + rm /tmp/tmp.2o4d0zc6L0 /tmp/tmp.kV81vJW6Xv + return 0 + wait_backup backup-minio-arbiter-nv + local backup_name=backup-minio-arbiter-nv + local target_state=ready + set +o xtrace waiting for backup-minio-arbiter-nv to reach ready state...... + run_restore backup-minio-arbiter-nv + local backup_name=backup-minio-arbiter-nv + log 'drop collection' + set +o xtrace [2025-05-21T23:45:25+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-21272 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wZ760BGb1n +++ mktemp ++ local LAST_ERR=/tmp/tmp.tHNx2aUygj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wZ760BGb1n ++ cat /tmp/tmp.tHNx2aUygj ++ rm /tmp/tmp.wZ760BGb1n /tmp/tmp.tHNx2aUygj ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.BtL5i51xGA ++ mktemp + local LAST_ERR=/tmp/tmp.9MqAYh3Wyn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BtL5i51xGA Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-nv-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-arbiter-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-3.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c18feb43-5728-43e3-a391-3a6a53873668") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.9MqAYh3Wyn + rm /tmp/tmp.BtL5i51xGA /tmp/tmp.9MqAYh3Wyn + return 0 + log 'check backup and restore -- backup-minio-arbiter-nv' + set +o xtrace [2025-05-21T23:45:28+0000] check backup and restore -- backup-minio-arbiter-nv + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/conf/restore.yml + /usr/bin/sed -e 's/backupName:/backupName: backup-minio-arbiter-nv/' + /usr/bin/sed -e 's/name:/name: restore-backup-minio-arbiter-nv/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.YlltSE5ls8 ++ mktemp + local LAST_ERR=/tmp/tmp.POFgr68o9c + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YlltSE5ls8 perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-arbiter-nv created + cat /tmp/tmp.POFgr68o9c + rm /tmp/tmp.YlltSE5ls8 /tmp/tmp.POFgr68o9c + return 0 + run_recovery_check backup-minio-arbiter-nv _restore-arbiter-nv + local backup_name=backup-minio-arbiter-nv + local compare_suffix=_restore-arbiter-nv + local base=true + wait_restore backup-minio-arbiter-nv some-name requested 0 3000 + local backup_name=backup-minio-arbiter-nv + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-arbiter-nv object to be createdOK Waiting psmdb-restore/restore-backup-minio-arbiter-nv to reach state "requested" ...OK + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore-arbiter-nv + local resource=statefulset/some-name-rs0 + local postfix=_restore-arbiter-nv + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-arbiter-nv.yml + local new_result=/tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-arbiter-nv-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-21272", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.VyvCK60Jfh ++ mktemp + local LAST_ERR=/tmp/tmp.OoLnP1vqUg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VyvCK60Jfh + cat /tmp/tmp.OoLnP1vqUg + rm /tmp/tmp.VyvCK60Jfh /tmp/tmp.OoLnP1vqUg + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-arbiter-nv.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-arbiter-nv.yml /tmp/tmp.SERsePjeEv/statefulset_some-name-rs0.yml + wait_restore backup-minio-arbiter-nv some-name ready 0 1800 + local backup_name=backup-minio-arbiter-nv + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-arbiter-nv object to be createdOK Waiting psmdb-restore/restore-backup-minio-arbiter-nv to reach state "ready" .OK + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fkATshQhWt +++ mktemp ++ local LAST_ERR=/tmp/tmp.W6ummu9WFs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fkATshQhWt ++ cat /tmp/tmp.W6ummu9WFs ++ rm /tmp/tmp.fkATshQhWt /tmp/tmp.W6ummu9WFs ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8sobVuSHqi +++ mktemp ++ local LAST_ERR=/tmp/tmp.sTnjGoX2qg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8sobVuSHqi ++ cat /tmp/tmp.sTnjGoX2qg ++ rm /tmp/tmp.8sobVuSHqi /tmp/tmp.sTnjGoX2qg ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ui7pbeE8kp +++ mktemp ++ local LAST_ERR=/tmp/tmp.aJRu4Z9p72 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ui7pbeE8kp ++ cat /tmp/tmp.aJRu4Z9p72 ++ rm /tmp/tmp.Ui7pbeE8kp /tmp/tmp.aJRu4Z9p72 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0F1KTjBAXM +++ mktemp ++ local LAST_ERR=/tmp/tmp.mjUrqJHMFy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0F1KTjBAXM ++ cat /tmp/tmp.mjUrqJHMFy ++ rm /tmp/tmp.0F1KTjBAXM /tmp/tmp.mjUrqJHMFy ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vFwLFuNBDi +++ mktemp ++ local LAST_ERR=/tmp/tmp.Mg3x6qiycn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vFwLFuNBDi ++ cat /tmp/tmp.Mg3x6qiycn ++ rm /tmp/tmp.vFwLFuNBDi /tmp/tmp.Mg3x6qiycn ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ErKNsEoRg9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.rZNUx1mLhb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ErKNsEoRg9 ++ cat /tmp/tmp.rZNUx1mLhb ++ rm /tmp/tmp.ErKNsEoRg9 /tmp/tmp.rZNUx1mLhb ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZxC45cYsdx +++ mktemp ++ local LAST_ERR=/tmp/tmp.cj2F5egmuT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZxC45cYsdx ++ cat /tmp/tmp.cj2F5egmuT ++ rm /tmp/tmp.ZxC45cYsdx /tmp/tmp.cj2F5egmuT ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kkGLUuoBGQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.CqOhynIuDS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kkGLUuoBGQ ++ cat /tmp/tmp.CqOhynIuDS ++ rm /tmp/tmp.kkGLUuoBGQ /tmp/tmp.CqOhynIuDS ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Bo6hOaF00U +++ mktemp ++ local LAST_ERR=/tmp/tmp.VeIgBs2Dsd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Bo6hOaF00U ++ cat /tmp/tmp.VeIgBs2Dsd ++ rm /tmp/tmp.Bo6hOaF00U /tmp/tmp.VeIgBs2Dsd ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HVvUaPQOh0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.GK1wlFYxIB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HVvUaPQOh0 ++ cat /tmp/tmp.GK1wlFYxIB ++ rm /tmp/tmp.HVvUaPQOh0 /tmp/tmp.GK1wlFYxIB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vJnjois5D2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.9LhiVaIMvl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vJnjois5D2 ++ cat /tmp/tmp.9LhiVaIMvl ++ rm /tmp/tmp.vJnjois5D2 /tmp/tmp.9LhiVaIMvl ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9Dk4SOinWj +++ mktemp ++ local LAST_ERR=/tmp/tmp.bFH98t8wsn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9Dk4SOinWj ++ cat /tmp/tmp.bFH98t8wsn ++ rm /tmp/tmp.9Dk4SOinWj /tmp/tmp.bFH98t8wsn ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uylsNMm5H2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.RVuIQd97SY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uylsNMm5H2 ++ cat /tmp/tmp.RVuIQd97SY ++ rm /tmp/tmp.uylsNMm5H2 /tmp/tmp.RVuIQd97SY ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pyN7Jd5D7T +++ mktemp ++ local LAST_ERR=/tmp/tmp.cDmQnn8oxl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pyN7Jd5D7T ++ cat /tmp/tmp.cDmQnn8oxl ++ rm /tmp/tmp.pyN7Jd5D7T /tmp/tmp.cDmQnn8oxl ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vsxo9r0S0q +++ mktemp ++ local LAST_ERR=/tmp/tmp.MgypU5TtBq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vsxo9r0S0q ++ cat /tmp/tmp.MgypU5TtBq ++ rm /tmp/tmp.vsxo9r0S0q /tmp/tmp.MgypU5TtBq ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.C4z3SRVz1b +++ mktemp ++ local LAST_ERR=/tmp/tmp.W7qcWOEByL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.C4z3SRVz1b ++ cat /tmp/tmp.W7qcWOEByL ++ rm /tmp/tmp.C4z3SRVz1b /tmp/tmp.W7qcWOEByL ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hoSgArH6tF +++ mktemp ++ local LAST_ERR=/tmp/tmp.Qcz69Rhmk4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hoSgArH6tF ++ cat /tmp/tmp.Qcz69Rhmk4 ++ rm /tmp/tmp.hoSgArH6tF /tmp/tmp.Qcz69Rhmk4 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish.. + [[ true == true ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:53:51+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bKN5RAyfAc +++ mktemp ++ local LAST_ERR=/tmp/tmp.fXPHVhPx1b ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bKN5RAyfAc ++ cat /tmp/tmp.fXPHVhPx1b ++ rm /tmp/tmp.bKN5RAyfAc /tmp/tmp.fXPHVhPx1b ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.nRv47vWvC7 ++ mktemp + local LAST_ERR=/tmp/tmp.faQq2JpP64 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nRv47vWvC7 + cat /tmp/tmp.faQq2JpP64 + rm /tmp/tmp.nRv47vWvC7 /tmp/tmp.faQq2JpP64 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.SERsePjeEv/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:53:54+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jaCePrNWe4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Hqby73Zb8E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jaCePrNWe4 ++ cat /tmp/tmp.Hqby73Zb8E ++ rm /tmp/tmp.jaCePrNWe4 /tmp/tmp.Hqby73Zb8E ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ryXNJx7upH ++ mktemp + local LAST_ERR=/tmp/tmp.He8wktkfbd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ryXNJx7upH + cat /tmp/tmp.He8wktkfbd + rm /tmp/tmp.ryXNJx7upH /tmp/tmp.He8wktkfbd + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.SERsePjeEv/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T23:53:56+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pJNGCBnYTD +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZtkaYpajpW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pJNGCBnYTD ++ cat /tmp/tmp.ZtkaYpajpW ++ rm /tmp/tmp.pJNGCBnYTD /tmp/tmp.ZtkaYpajpW ++ return 0 + local client_container=psmdb-client-b9788d8bc-fgxc6 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.vvXbnUf7zi ++ mktemp + local LAST_ERR=/tmp/tmp.UPSryWRHxI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-fgxc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-21272.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vvXbnUf7zi + cat /tmp/tmp.UPSryWRHxI + rm /tmp/tmp.vvXbnUf7zi /tmp/tmp.UPSryWRHxI + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.SERsePjeEv/find + destroy demand-backup-incremental-21272 + local namespace=demand-backup-incremental-21272 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ wc -l ++ kubectl_bin get psmdb-backup --no-headers +++ mktemp ++ local LAST_OUT=/tmp/tmp.rheXABx5bt +++ mktemp ++ local LAST_ERR=/tmp/tmp.bLF0vMP8ka ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rheXABx5bt ++ cat /tmp/tmp.bLF0vMP8ka ++ rm /tmp/tmp.rheXABx5bt /tmp/tmp.bLF0vMP8ka ++ return 0 + '[' 6 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.LyiJxT2KuZ ++ mktemp + local LAST_ERR=/tmp/tmp.MDdqRjiH8X + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LyiJxT2KuZ NAME CLUSTER STORAGE DESTINATION TYPE STATUS COMPLETED AGE backup-aws-s3 some-name aws-s3 s3://operator-testing/psmdb-demand-backup-incremental/2025-05-21T23:09:09Z incremental-base ready 44m 44m backup-azure-blob some-name azure-blob azure://operator-testing/psmdb-demand-backup-incremental/2025-05-21T23:09:30Z incremental-base ready 44m 44m backup-gcp-cs some-name gcp-cs s3://operator-testing/psmdb-demand-backup-incremental/2025-05-21T23:09:47Z incremental-base ready 43m 44m backup-minio some-name minio s3://operator-testing/2025-05-21T23:10:16Z incremental-base ready 43m 43m backup-minio-arbiter-nv some-name minio s3://operator-testing/2025-05-21T23:45:12Z incremental-base ready 8m41s 8m50s backup-minio-not-base some-name minio s3://operator-testing/2025-05-21T23:10:48Z incremental ready 43m 43m + cat /tmp/tmp.MDdqRjiH8X + rm /tmp/tmp.LyiJxT2KuZ /tmp/tmp.MDdqRjiH8X + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.7LErIqESOk ++ mktemp + local LAST_ERR=/tmp/tmp.R1tvqRyRWS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7LErIqESOk perconaservermongodbbackup.psmdb.percona.com "backup-aws-s3" deleted perconaservermongodbbackup.psmdb.percona.com "backup-azure-blob" deleted perconaservermongodbbackup.psmdb.percona.com "backup-gcp-cs" deleted perconaservermongodbbackup.psmdb.percona.com "backup-minio" deleted perconaservermongodbbackup.psmdb.percona.com "backup-minio-arbiter-nv" deleted perconaservermongodbbackup.psmdb.percona.com "backup-minio-not-base" deleted + cat /tmp/tmp.R1tvqRyRWS + rm /tmp/tmp.7LErIqESOk /tmp/tmp.R1tvqRyRWS + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.tQw6dndbAF ++ mktemp + local LAST_ERR=/tmp/tmp.KUBCcVhgR4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tQw6dndbAF customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.KUBCcVhgR4 + rm /tmp/tmp.tQw6dndbAF /tmp/tmp.KUBCcVhgR4 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.pgR66dnoNi ++ mktemp + local LAST_ERR=/tmp/tmp.ayA2iv7Ets + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pgR66dnoNi + cat /tmp/tmp.ayA2iv7Ets + rm /tmp/tmp.pgR66dnoNi /tmp/tmp.ayA2iv7Ets + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.RRmRa46nis ++ mktemp + local LAST_ERR=/tmp/tmp.gGUgTEpsfM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RRmRa46nis + cat /tmp/tmp.gGUgTEpsfM + rm /tmp/tmp.RRmRa46nis /tmp/tmp.gGUgTEpsfM + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.YqNJV9O1D3 ++ mktemp + local LAST_ERR=/tmp/tmp.E0keSCqA3J + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YqNJV9O1D3 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.E0keSCqA3J + rm /tmp/tmp.YqNJV9O1D3 /tmp/tmp.E0keSCqA3J + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.lp1MfSVZer ++ mktemp + local LAST_ERR=/tmp/tmp.3g3hHgCgWD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1938/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lp1MfSVZer clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.3g3hHgCgWD + rm /tmp/tmp.lp1MfSVZer /tmp/tmp.3g3hHgCgWD + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.LYNDTWDBO5 ++ mktemp + local LAST_ERR=/tmp/tmp.HvtYKyKizf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.LYNDTWDBO5 + cat /tmp/tmp.HvtYKyKizf Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.LYNDTWDBO5 + cat /tmp/tmp.HvtYKyKizf Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.LYNDTWDBO5 + cat /tmp/tmp.HvtYKyKizf Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.LYNDTWDBO5 + cat /tmp/tmp.HvtYKyKizf Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.LYNDTWDBO5 /tmp/tmp.HvtYKyKizf + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace demand-backup-incremental-21272 + rm -rf /tmp/tmp.SERsePjeEv ++ mktemp + local LAST_OUT=/tmp/tmp.ynP1tFs45A + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.kNYYI1mpw0 ++ mktemp + local LAST_ERR=/tmp/tmp.DEH3MBsPjV + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.mKz5MaryGV + local exit_status=0 + local timeout=4 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace demand-backup-incremental-21272