Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/logs/demand-backup-fs.log WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + [[ -n '' ]] + cluster=some-name + create_infra demand-backup-fs-18803 + local ns=demand-backup-fs-18803 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.poL1sbvQL9 ++ mktemp + local LAST_ERR=/tmp/tmp.dM1thL2ZLl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.poL1sbvQL9 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.dM1thL2ZLl + rm /tmp/tmp.poL1sbvQL9 /tmp/tmp.dM1thL2ZLl + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-27269 backup-nfs-logical-pitr --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical-pitr patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-27269 backup-nfs-physical --type=merge -p '{"metadata":{"finalizers":[]}}' Error from server (NotFound): perconaservermongodbbackups.psmdb.percona.com "backup-nfs-physical" not found + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-27269 backup-nfs-physical-pitr --type=merge -p '{"metadata":{"finalizers":[]}}' Error from server (NotFound): perconaservermongodbbackups.psmdb.percona.com "backup-nfs-physical-pitr" not found + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.dbuGOg3xCw ++ mktemp + local LAST_ERR=/tmp/tmp.Q7k4j83KMp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dbuGOg3xCw + cat /tmp/tmp.Q7k4j83KMp + rm /tmp/tmp.dbuGOg3xCw /tmp/tmp.Q7k4j83KMp + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.fEyqyOEL94 ++ mktemp + local LAST_ERR=/tmp/tmp.hEu9LFX5jd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fEyqyOEL94 + cat /tmp/tmp.hEu9LFX5jd + rm /tmp/tmp.fEyqyOEL94 /tmp/tmp.hEu9LFX5jd + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbs.psmdb.percona.com -n demand-backup-fs-27269 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/some-name patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.vjzDriprvd ++ mktemp + local LAST_ERR=/tmp/tmp.7seLx00rIM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vjzDriprvd customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.7seLx00rIM + rm /tmp/tmp.vjzDriprvd /tmp/tmp.7seLx00rIM + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.RI6VgKx6nL ++ mktemp + local LAST_ERR=/tmp/tmp.msdNimdYLw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RI6VgKx6nL clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.msdNimdYLw + rm /tmp/tmp.RI6VgKx6nL /tmp/tmp.msdNimdYLw + return 0 + check_crd_for_deletion PR-1897-ea494d1c + local git_tag=PR-1897-ea494d1c ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1897-ea494d1c/deploy/crd.yaml ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ yq eval .metadata.name ++ /usr/bin/sed s/---//g + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PZ1Qg5sajw +++ mktemp ++ local LAST_ERR=/tmp/tmp.KZZrOnfKH0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.PZ1Qg5sajw ++ cat /tmp/tmp.KZZrOnfKH0 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.PZ1Qg5sajw ++ cat /tmp/tmp.KZZrOnfKH0 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.PZ1Qg5sajw ++ cat /tmp/tmp.KZZrOnfKH0 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.PZ1Qg5sajw ++ cat /tmp/tmp.KZZrOnfKH0 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.PZ1Qg5sajw /tmp/tmp.KZZrOnfKH0 ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ sed s/NAMESPACE// ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get clusterrolebinding ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + '[' -n '' ']' ++ mktemp + xargs kubectl delete ns + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.iyuIm9F2Cu + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.7SPAqSBdjK ++ mktemp + local LAST_ERR=/tmp/tmp.nLuaCZHWH0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.ESnF6I18da + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iyuIm9F2Cu + cat /tmp/tmp.nLuaCZHWH0 + rm /tmp/tmp.iyuIm9F2Cu /tmp/tmp.nLuaCZHWH0 + return 0 namespace "demand-backup-fs-27269" deleted namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted namespace "storage" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7SPAqSBdjK namespace "psmdb-operator" deleted + cat /tmp/tmp.ESnF6I18da + rm /tmp/tmp.7SPAqSBdjK /tmp/tmp.ESnF6I18da + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.t39XC1K9oF ++ mktemp + local LAST_ERR=/tmp/tmp.FJvUK12c0E + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.t39XC1K9oF + cat /tmp/tmp.FJvUK12c0E + rm /tmp/tmp.t39XC1K9oF /tmp/tmp.FJvUK12c0E + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ez4qlJCmco ++ mktemp + local LAST_ERR=/tmp/tmp.nzFGYtUBmg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ez4qlJCmco namespace/psmdb-operator created + cat /tmp/tmp.nzFGYtUBmg + rm /tmp/tmp.ez4qlJCmco /tmp/tmp.nzFGYtUBmg + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.eb3RwCafpL +++ mktemp ++ local LAST_ERR=/tmp/tmp.PkYvGYEBqP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eb3RwCafpL ++ cat /tmp/tmp.PkYvGYEBqP ++ rm /tmp/tmp.eb3RwCafpL /tmp/tmp.PkYvGYEBqP ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1897-ea494d1c-6-cluster4 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.lZAFlpTui2 ++ mktemp + local LAST_ERR=/tmp/tmp.6VjPUFTwf0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1897-ea494d1c-6-cluster4 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lZAFlpTui2 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1897-ea494d1c-6-cluster4" modified. + cat /tmp/tmp.6VjPUFTwf0 + rm /tmp/tmp.lZAFlpTui2 /tmp/tmp.6VjPUFTwf0 + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.sB9xmzRord ++ mktemp + local LAST_ERR=/tmp/tmp.MTktSfdR1y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sB9xmzRord customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.MTktSfdR1y + rm /tmp/tmp.sB9xmzRord /tmp/tmp.MTktSfdR1y + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.iabUiKEuQG ++ mktemp + local LAST_ERR=/tmp/tmp.rgAzlLK4rO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iabUiKEuQG clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.rgAzlLK4rO + rm /tmp/tmp.iabUiKEuQG /tmp/tmp.rgAzlLK4rO + return 0 + kubectl_bin apply -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1897-ea494d1c") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.EYZzEmTK3T ++ mktemp + local LAST_ERR=/tmp/tmp.n6JLnY6Vkv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EYZzEmTK3T deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.n6JLnY6Vkv + rm /tmp/tmp.EYZzEmTK3T /tmp/tmp.n6JLnY6Vkv + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.H1VbHjicdD +++ mktemp ++ local LAST_ERR=/tmp/tmp.n6QtS7jl04 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.H1VbHjicdD ++ cat /tmp/tmp.n6QtS7jl04 ++ rm /tmp/tmp.H1VbHjicdD /tmp/tmp.n6QtS7jl04 ++ return 0 + wait_pod percona-server-mongodb-operator-564698798-sgctn + local pod=percona-server-mongodb-operator-564698798-sgctn + set +o xtrace waiting for pod/percona-server-mongodb-operator-564698798-sgctn to be ready.OK + create_namespace demand-backup-fs-18803 + local namespace=demand-backup-fs-18803 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ awk '-F ' '{print $2}' ++ tail -n1 ++ sed s/NAMESPACE// ++ helm list --all-namespaces --filter chaos-mesh + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + '[' -n '' ']' + awk '{print$1}' + desc 'cleaned up old namespaces demand-backup-fs-18803' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-fs-18803 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace demand-backup-fs-18803 --ignore-not-found ++ mktemp + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.XCVWxtSbwX ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.qYmQszQtpP + local exit_status=0 + local timeout=4 + local LAST_OUT=/tmp/tmp.1MNZczil08 ++ seq 0 2 ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl delete namespace demand-backup-fs-18803 --ignore-not-found + local LAST_ERR=/tmp/tmp.ZfSPK4ORWL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1MNZczil08 + cat /tmp/tmp.ZfSPK4ORWL + rm /tmp/tmp.1MNZczil08 /tmp/tmp.ZfSPK4ORWL + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XCVWxtSbwX + cat /tmp/tmp.qYmQszQtpP + rm /tmp/tmp.XCVWxtSbwX /tmp/tmp.qYmQszQtpP + return 0 + kubectl_bin wait --for=delete namespace demand-backup-fs-18803 ++ mktemp + local LAST_OUT=/tmp/tmp.ubt9Qf7g0B ++ mktemp + local LAST_ERR=/tmp/tmp.9m8vfKk33L + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace demand-backup-fs-18803 namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ubt9Qf7g0B + cat /tmp/tmp.9m8vfKk33L + rm /tmp/tmp.ubt9Qf7g0B /tmp/tmp.9m8vfKk33L + return 0 + desc 'create namespace demand-backup-fs-18803' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-fs-18803 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-fs-18803 ++ mktemp + local LAST_OUT=/tmp/tmp.2kyRFWY22h ++ mktemp + local LAST_ERR=/tmp/tmp.OJ8E2MeWtL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace demand-backup-fs-18803 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2kyRFWY22h namespace/demand-backup-fs-18803 created + cat /tmp/tmp.OJ8E2MeWtL + rm /tmp/tmp.2kyRFWY22h /tmp/tmp.OJ8E2MeWtL + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.PiN81PIUGo +++ mktemp ++ local LAST_ERR=/tmp/tmp.6iFDHDRHbC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PiN81PIUGo ++ cat /tmp/tmp.6iFDHDRHbC ++ rm /tmp/tmp.PiN81PIUGo /tmp/tmp.6iFDHDRHbC ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1897-ea494d1c-6-cluster4 --namespace=demand-backup-fs-18803 ++ mktemp + local LAST_OUT=/tmp/tmp.TIdZ4etgnh ++ mktemp + local LAST_ERR=/tmp/tmp.nrv0Htlnvl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1897-ea494d1c-6-cluster4 --namespace=demand-backup-fs-18803 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TIdZ4etgnh Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1897-ea494d1c-6-cluster4" modified. + cat /tmp/tmp.nrv0Htlnvl + rm /tmp/tmp.TIdZ4etgnh /tmp/tmp.nrv0Htlnvl + return 0 + kubectl_bin delete ns storage ++ mktemp + local LAST_OUT=/tmp/tmp.mEvsNYkmJu ++ mktemp + local LAST_ERR=/tmp/tmp.pPNi5XDOwm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete ns storage + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.mEvsNYkmJu + cat /tmp/tmp.pPNi5XDOwm Error from server (NotFound): namespaces "storage" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete ns storage + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.mEvsNYkmJu + cat /tmp/tmp.pPNi5XDOwm Error from server (NotFound): namespaces "storage" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete ns storage + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.mEvsNYkmJu + cat /tmp/tmp.pPNi5XDOwm Error from server (NotFound): namespaces "storage" not found + sleep 8 + cat /tmp/tmp.mEvsNYkmJu + cat /tmp/tmp.pPNi5XDOwm Error from server (NotFound): namespaces "storage" not found + rm /tmp/tmp.mEvsNYkmJu /tmp/tmp.pPNi5XDOwm + return 1 + : + [[ 1 != 1 ]] + log 'deploying NFS server' + set +o xtrace [2025-05-19T14:40:56+0000] deploying NFS server + deploy_nfs_server + kubectl_bin create namespace storage ++ mktemp + local LAST_OUT=/tmp/tmp.ovJj2w1whn ++ mktemp + local LAST_ERR=/tmp/tmp.eMcdMwyFN5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace storage + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ovJj2w1whn namespace/storage created + cat /tmp/tmp.eMcdMwyFN5 + rm /tmp/tmp.ovJj2w1whn /tmp/tmp.eMcdMwyFN5 + return 0 + kubectl_bin apply -n storage -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/conf/nfs-server.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.XCg8U1Bvzj ++ mktemp + local LAST_ERR=/tmp/tmp.NhgGLM9d6a + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n storage -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/conf/nfs-server.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XCg8U1Bvzj persistentvolumeclaim/nfs-pvc created deployment.apps/nfs-server created service/nfs-service created + cat /tmp/tmp.NhgGLM9d6a + rm /tmp/tmp.XCg8U1Bvzj /tmp/tmp.NhgGLM9d6a + return 0 + sleep 5 ++ kubectl_bin get pod -n storage -l app=nfs-server -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HW5rCX7NBM +++ mktemp ++ local LAST_ERR=/tmp/tmp.GvdyFrRbx7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pod -n storage -l app=nfs-server -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HW5rCX7NBM ++ cat /tmp/tmp.GvdyFrRbx7 ++ rm /tmp/tmp.HW5rCX7NBM /tmp/tmp.GvdyFrRbx7 ++ return 0 + local nfsPod=nfs-server-d6d48c54d-jckch ++ kubectl_bin get pod nfs-server-d6d48c54d-jckch -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NjMfFCXG5S +++ mktemp ++ local LAST_ERR=/tmp/tmp.tPxwHdqK3y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pod nfs-server-d6d48c54d-jckch -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NjMfFCXG5S ++ cat /tmp/tmp.tPxwHdqK3y ++ rm /tmp/tmp.NjMfFCXG5S /tmp/tmp.tPxwHdqK3y ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-d6d48c54d-jckch to start Running' + set +o xtrace [2025-05-19T14:41:08+0000] Waiting for nfs-server-d6d48c54d-jckch to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-d6d48c54d-jckch -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hIlyQOQmj3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ajm8rqArL4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pod nfs-server-d6d48c54d-jckch -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hIlyQOQmj3 ++ cat /tmp/tmp.Ajm8rqArL4 ++ rm /tmp/tmp.hIlyQOQmj3 /tmp/tmp.Ajm8rqArL4 ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-d6d48c54d-jckch to start Running' + set +o xtrace [2025-05-19T14:41:11+0000] Waiting for nfs-server-d6d48c54d-jckch to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-d6d48c54d-jckch -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cvuTyy1weN +++ mktemp ++ local LAST_ERR=/tmp/tmp.7UkT8V7ZZI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pod nfs-server-d6d48c54d-jckch -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cvuTyy1weN ++ cat /tmp/tmp.7UkT8V7ZZI ++ rm /tmp/tmp.cvuTyy1weN /tmp/tmp.7UkT8V7ZZI ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-d6d48c54d-jckch to start Running' + set +o xtrace [2025-05-19T14:41:13+0000] Waiting for nfs-server-d6d48c54d-jckch to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-d6d48c54d-jckch -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2WW5gOhWlE +++ mktemp ++ local LAST_ERR=/tmp/tmp.Cb6Ix7KuYr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pod nfs-server-d6d48c54d-jckch -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2WW5gOhWlE ++ cat /tmp/tmp.Cb6Ix7KuYr ++ rm /tmp/tmp.2WW5gOhWlE /tmp/tmp.Cb6Ix7KuYr ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-d6d48c54d-jckch to start Running' + set +o xtrace [2025-05-19T14:41:16+0000] Waiting for nfs-server-d6d48c54d-jckch to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-d6d48c54d-jckch -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9vd03gEiLd +++ mktemp ++ local LAST_ERR=/tmp/tmp.tKvJ5m3mBu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pod nfs-server-d6d48c54d-jckch -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9vd03gEiLd ++ cat /tmp/tmp.tKvJ5m3mBu ++ rm /tmp/tmp.9vd03gEiLd /tmp/tmp.tKvJ5m3mBu ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-d6d48c54d-jckch to start Running' + set +o xtrace [2025-05-19T14:41:18+0000] Waiting for nfs-server-d6d48c54d-jckch to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-d6d48c54d-jckch -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DeeJISzuX1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.clgNNXlS3D ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pod nfs-server-d6d48c54d-jckch -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DeeJISzuX1 ++ cat /tmp/tmp.clgNNXlS3D ++ rm /tmp/tmp.DeeJISzuX1 /tmp/tmp.clgNNXlS3D ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-d6d48c54d-jckch to start Running' + set +o xtrace [2025-05-19T14:41:21+0000] Waiting for nfs-server-d6d48c54d-jckch to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-d6d48c54d-jckch -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7VTHQ5TALy +++ mktemp ++ local LAST_ERR=/tmp/tmp.sPIqVL7Eac ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pod nfs-server-d6d48c54d-jckch -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7VTHQ5TALy ++ cat /tmp/tmp.sPIqVL7Eac ++ rm /tmp/tmp.7VTHQ5TALy /tmp/tmp.sPIqVL7Eac ++ return 0 + [[ Running == \R\u\n\n\i\n\g ]] + kubectl_bin exec -n storage nfs-server-d6d48c54d-jckch -- mkdir /exports/psmdb-some-name-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.nVLW0sSUBA ++ mktemp + local LAST_ERR=/tmp/tmp.1AtF3LY0FA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec -n storage nfs-server-d6d48c54d-jckch -- mkdir /exports/psmdb-some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nVLW0sSUBA + cat /tmp/tmp.1AtF3LY0FA + rm /tmp/tmp.nVLW0sSUBA /tmp/tmp.1AtF3LY0FA + return 0 + kubectl_bin exec -n storage nfs-server-d6d48c54d-jckch -- chown 1001:1001 /exports/psmdb-some-name-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.mL3bQlpAd3 ++ mktemp + local LAST_ERR=/tmp/tmp.Vp3qYKL5ik + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec -n storage nfs-server-d6d48c54d-jckch -- chown 1001:1001 /exports/psmdb-some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mL3bQlpAd3 + cat /tmp/tmp.Vp3qYKL5ik + rm /tmp/tmp.mL3bQlpAd3 /tmp/tmp.Vp3qYKL5ik + return 0 + log 'creating secrets and start client' + set +o xtrace [2025-05-19T14:41:28+0000] creating secrets and start client + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Y7u4LcjzjS ++ mktemp + local LAST_ERR=/tmp/tmp.BLrObQVTGi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Y7u4LcjzjS secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.BLrObQVTGi + rm /tmp/tmp.Y7u4LcjzjS /tmp/tmp.BLrObQVTGi + return 0 + log 'creating PSMDB cluster some-name' + set +o xtrace [2025-05-19T14:41:31+0000] creating PSMDB cluster some-name + [[ 1 != 1 ]] + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/conf/some-name.yaml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/conf/some-name.yaml + kubectl_bin apply -f - + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + local LAST_OUT=/tmp/tmp.fldxwfytwf + yq eval '.spec.upgradeOptions.apply="Never"' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/conf/some-name.yaml ++ mktemp + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + local LAST_ERR=/tmp/tmp.Esrbabn2SQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1897-ea494d1c"' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fldxwfytwf perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.Esrbabn2SQ + rm /tmp/tmp.fldxwfytwf /tmp/tmp.Esrbabn2SQ + return 0 + log 'wait for all 3 pods to start' + set +o xtrace [2025-05-19T14:41:34+0000] wait for all 3 pods to start + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.......OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready....OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JNmb1w7BfN +++ mktemp ++ local LAST_ERR=/tmp/tmp.R0suYPVaUZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JNmb1w7BfN ++ cat /tmp/tmp.R0suYPVaUZ ++ rm /tmp/tmp.JNmb1w7BfN /tmp/tmp.R0suYPVaUZ ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.26filJJRXP +++ mktemp ++ local LAST_ERR=/tmp/tmp.3zRJX5Yma1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.26filJJRXP ++ cat /tmp/tmp.3zRJX5Yma1 ++ rm /tmp/tmp.26filJJRXP /tmp/tmp.3zRJX5Yma1 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness... + [[ 1 == 1 ]] + log 'checking if statefulset created with expected config' + set +o xtrace [2025-05-19T14:43:06+0000] checking if statefulset created with expected config + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.NzbDqpY8QV/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-fs-18803", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.NNVMQLwKjl ++ mktemp + local LAST_ERR=/tmp/tmp.RRGq7g3Hup + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NNVMQLwKjl + cat /tmp/tmp.RRGq7g3Hup + rm /tmp/tmp.NNVMQLwKjl /tmp/tmp.RRGq7g3Hup + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.NzbDqpY8QV/statefulset_some-name-rs0.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.NzbDqpY8QV/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.NzbDqpY8QV/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0.yml /tmp/tmp.NzbDqpY8QV/statefulset_some-name-rs0.yml + log 'creating user' + set +o xtrace [2025-05-19T14:43:07+0000] creating user + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-18803 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-18803 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CiSuqWcWZ1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Le6gn1na5Y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CiSuqWcWZ1 ++ cat /tmp/tmp.Le6gn1na5Y ++ rm /tmp/tmp.CiSuqWcWZ1 /tmp/tmp.Le6gn1na5Y ++ return 0 + local client_container=psmdb-client-66f577db5f-678sd + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-18803 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Tp3iJ0QLMk ++ mktemp + local LAST_ERR=/tmp/tmp.39f8pbXGWz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Tp3iJ0QLMk Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("ecb94a07-dfde-4b15-9fab-62e12780a92d") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.39f8pbXGWz + rm /tmp/tmp.Tp3iJ0QLMk /tmp/tmp.39f8pbXGWz + return 0 + sleep 2 + log 'write initial data' + set +o xtrace [2025-05-19T14:43:13+0000] write initial data + write_data 100500 '' + local x=100500 + local find_prefix= + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BS9qYrd8f7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.WjRzFRKwbb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BS9qYrd8f7 ++ cat /tmp/tmp.WjRzFRKwbb ++ rm /tmp/tmp.BS9qYrd8f7 /tmp/tmp.WjRzFRKwbb ++ return 0 + local client_container=psmdb-client-66f577db5f-678sd + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-18803 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.KK1MginvoR ++ mktemp + local LAST_ERR=/tmp/tmp.1KAeHFFlxC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KK1MginvoR Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("2592cf0d-0031-4b15-a7be-7d6d8e111d7c") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.1KAeHFFlxC + rm /tmp/tmp.KK1MginvoR /tmp/tmp.1KAeHFFlxC + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-18803 '' .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local postfix= + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-19T14:43:16+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-18803 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.NmBYj3KTXu +++ mktemp ++ local LAST_ERR=/tmp/tmp.a2Xh7vl2pJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NmBYj3KTXu ++ cat /tmp/tmp.a2Xh7vl2pJ ++ rm /tmp/tmp.NmBYj3KTXu /tmp/tmp.a2Xh7vl2pJ ++ return 0 + local client_container=psmdb-client-66f577db5f-678sd + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-18803 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.UzaSDVqSYm ++ mktemp + local LAST_ERR=/tmp/tmp.P4QAzgreEk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UzaSDVqSYm + cat /tmp/tmp.P4QAzgreEk + rm /tmp/tmp.UzaSDVqSYm /tmp/tmp.P4QAzgreEk + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/compare/find.json /tmp/tmp.NzbDqpY8QV/find + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-0...2025-05-19T14:43:00.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-1...2025-05-19T14:43:05.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-2...2025-05-19T14:43:06.000+0000 I listening for the commands + desc 'CASE 1: Logical backup and restore' + set +o xtrace ----------------------------------------------------------------------------------- CASE 1: Logical backup and restore ----------------------------------------------------------------------------------- + backup_name=backup-nfs-logical + run_backup nfs backup-nfs-logical logical + local storage=nfs + local backup_name=backup-nfs-logical + local type=logical + desc 'run backup backup-nfs-logical' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-nfs-logical ----------------------------------------------------------------------------------- + kubectl_bin apply -f - + yq eval '.metadata.name = "backup-nfs-logical" | .spec.storageName = "nfs" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/conf/backup-nfs.yml ++ mktemp + local LAST_OUT=/tmp/tmp.gZOKkLxguq ++ mktemp + local LAST_ERR=/tmp/tmp.yzVpaB0zOk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gZOKkLxguq perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical created + cat /tmp/tmp.yzVpaB0zOk + rm /tmp/tmp.gZOKkLxguq /tmp/tmp.yzVpaB0zOk + return 0 + wait_backup backup-nfs-logical + local backup_name=backup-nfs-logical + local target_state=ready + set +o xtrace waiting for backup-nfs-logical to reach ready state..... + run_recovery_check backup-nfs-logical some-name -2nd '' + local backup=backup-nfs-logical + local cluster=some-name + local find_prefix_before=-2nd + local find_prefix_after= + write_data 100501 -2nd + local x=100501 + local find_prefix=-2nd + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.h7PX4XwWEu +++ mktemp ++ local LAST_ERR=/tmp/tmp.jaoLaehquA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.h7PX4XwWEu ++ cat /tmp/tmp.jaoLaehquA ++ rm /tmp/tmp.h7PX4XwWEu /tmp/tmp.jaoLaehquA ++ return 0 + local client_container=psmdb-client-66f577db5f-678sd + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-18803 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.PzW6tcvM3P ++ mktemp + local LAST_ERR=/tmp/tmp.5CxLDkCuG7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PzW6tcvM3P Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("09da4c88-938d-4b78-8915-e0f40b98c8e9") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.5CxLDkCuG7 + rm /tmp/tmp.PzW6tcvM3P /tmp/tmp.5CxLDkCuG7 + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-18803 -2nd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-19T14:43:44+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-18803 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1tsjk2dpKa +++ mktemp ++ local LAST_ERR=/tmp/tmp.XLw5LABqpg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1tsjk2dpKa ++ cat /tmp/tmp.XLw5LABqpg ++ rm /tmp/tmp.1tsjk2dpKa /tmp/tmp.XLw5LABqpg ++ return 0 + local client_container=psmdb-client-66f577db5f-678sd + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-18803 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.O9boYDAm1c ++ mktemp + local LAST_ERR=/tmp/tmp.BRKfkQ6cbB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.O9boYDAm1c + cat /tmp/tmp.BRKfkQ6cbB + rm /tmp/tmp.O9boYDAm1c /tmp/tmp.BRKfkQ6cbB + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/compare/find-2nd.json /tmp/tmp.NzbDqpY8QV/find-2nd + run_restore backup-nfs-logical + local backup_name=backup-nfs-logical + /usr/bin/sed -e 's/name:/name: restore-backup-nfs-logical/' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/conf/restore.yml + /usr/bin/sed -e 's/backupName:/backupName: backup-nfs-logical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.3yk61lkLCQ ++ mktemp + local LAST_ERR=/tmp/tmp.gNV3EI9ioU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3yk61lkLCQ perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-logical created + cat /tmp/tmp.gNV3EI9ioU + rm /tmp/tmp.3yk61lkLCQ /tmp/tmp.gNV3EI9ioU + return 0 + wait_restore backup-nfs-logical some-name + local backup_name=backup-nfs-logical + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace waiting psmdb-restore/restore-backup-nfs-logical to reach ready state.....OK + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CnV0ZgeR5V +++ mktemp ++ local LAST_ERR=/tmp/tmp.Nv2JsIha2i ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CnV0ZgeR5V ++ cat /tmp/tmp.Nv2JsIha2i ++ rm /tmp/tmp.CnV0ZgeR5V /tmp/tmp.Nv2JsIha2i ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-18803 '' .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local postfix= + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-19T14:44:12+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-18803 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DRHrgnWXq0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.X9sgYX0PP9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DRHrgnWXq0 ++ cat /tmp/tmp.X9sgYX0PP9 ++ rm /tmp/tmp.DRHrgnWXq0 /tmp/tmp.X9sgYX0PP9 ++ return 0 + local client_container=psmdb-client-66f577db5f-678sd + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-18803 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.u406iJBGnM ++ mktemp + local LAST_ERR=/tmp/tmp.OiT6OpOVWS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.u406iJBGnM + cat /tmp/tmp.OiT6OpOVWS + rm /tmp/tmp.u406iJBGnM /tmp/tmp.OiT6OpOVWS + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/compare/find.json /tmp/tmp.NzbDqpY8QV/find + desc 'CASE 2: Logical backup and PiTR' + set +o xtrace ----------------------------------------------------------------------------------- CASE 2: Logical backup and PiTR ----------------------------------------------------------------------------------- + backup_name=backup-nfs-logical-pitr + run_backup nfs backup-nfs-logical-pitr logical + local storage=nfs + local backup_name=backup-nfs-logical-pitr + local type=logical + desc 'run backup backup-nfs-logical-pitr' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-nfs-logical-pitr ----------------------------------------------------------------------------------- + kubectl_bin apply -f - + yq eval '.metadata.name = "backup-nfs-logical-pitr" | .spec.storageName = "nfs" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/conf/backup-nfs.yml ++ mktemp + local LAST_OUT=/tmp/tmp.jQngidReQj ++ mktemp + local LAST_ERR=/tmp/tmp.c9ijFmeWvq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jQngidReQj perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical-pitr created + cat /tmp/tmp.c9ijFmeWvq + rm /tmp/tmp.jQngidReQj /tmp/tmp.c9ijFmeWvq + return 0 + wait_backup backup-nfs-logical-pitr + local backup_name=backup-nfs-logical-pitr + local target_state=ready + set +o xtrace waiting for backup-nfs-logical-pitr to reach ready state.... + write_data 100502 -3rd + local x=100502 + local find_prefix=-3rd + run_mongo 'use myApp\n db.test.insert({ x: 100502 })' myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local 'command=use myApp\n db.test.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vrXQAoHFec +++ mktemp ++ local LAST_ERR=/tmp/tmp.eiS6XzwV8q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vrXQAoHFec ++ cat /tmp/tmp.eiS6XzwV8q ++ rm /tmp/tmp.vrXQAoHFec /tmp/tmp.eiS6XzwV8q ++ return 0 + local client_container=psmdb-client-66f577db5f-678sd + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-18803 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.khmdXox7WY ++ mktemp + local LAST_ERR=/tmp/tmp.SE7czMXvaI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.khmdXox7WY Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("4b465ae8-51ac-4730-9c27-54ddccc6a427") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.SE7czMXvaI + rm /tmp/tmp.khmdXox7WY /tmp/tmp.SE7czMXvaI + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-18803 -3rd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-19T14:44:34+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-18803 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CEir5hvR54 +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_ERR=/tmp/tmp.JNegSLIugG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CEir5hvR54 ++ cat /tmp/tmp.JNegSLIugG ++ rm /tmp/tmp.CEir5hvR54 /tmp/tmp.JNegSLIugG ++ return 0 + local client_container=psmdb-client-66f577db5f-678sd + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-18803 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.iTBBFh3htF ++ mktemp + local LAST_ERR=/tmp/tmp.YcKA582697 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iTBBFh3htF + cat /tmp/tmp.YcKA582697 + rm /tmp/tmp.iTBBFh3htF /tmp/tmp.YcKA582697 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/compare/find-3rd.json /tmp/tmp.NzbDqpY8QV/find-3rd + run_pitr_check backup-nfs-logical-pitr some-name -3rd + local backup=backup-nfs-logical-pitr + local cluster=some-name + local find_prefix=-3rd + wait_for_oplogs some-name + local cluster1=some-name ++ jq '.backups.snapshot[0].restoreTo' ++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ mktemp ++ local LAST_OUT=/tmp/tmp.py8e1Ge2Zu +++ mktemp ++ local LAST_ERR=/tmp/tmp.cPQn00lVGu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.py8e1Ge2Zu ++ cat /tmp/tmp.cPQn00lVGu ++ rm /tmp/tmp.py8e1Ge2Zu /tmp/tmp.cPQn00lVGu ++ return 0 + local backup_last_write=1747665865 + local retries=0 ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3y4vxvAeQS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8dzINLIbIP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.3y4vxvAeQS +++ cat /tmp/tmp.8dzINLIbIP +++ rm /tmp/tmp.3y4vxvAeQS /tmp/tmp.8dzINLIbIP +++ return 0 ++ echo null + local last_chunk=null + [[ null -gt 1747665865 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IE5gRh7pkR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1RaefuaY0M +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.IE5gRh7pkR +++ cat /tmp/tmp.1RaefuaY0M +++ rm /tmp/tmp.IE5gRh7pkR /tmp/tmp.1RaefuaY0M +++ return 0 ++ echo null + last_chunk=null + retries=1 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/bin/date -d@null '+%Y-%m-%d %H:%M:%S' /usr/bin/date: invalid date ‘@null’ ++ echo ++ format_date 1747665865 ++ local timestamp=1747665865 +++ TZ=UTC +++ /usr/bin/date -d@1747665865 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:44:25 + log 'Waiting for last oplog chunk () to be greater than last write (2025-05-19 14:44:25)' + set +o xtrace [2025-05-19T14:44:43+0000] Waiting for last oplog chunk () to be greater than last write (2025-05-19 14:44:25) + sleep 10 + [[ null -gt 1747665865 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.s58miUk114 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YGHlp92iYM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.s58miUk114 +++ cat /tmp/tmp.YGHlp92iYM +++ rm /tmp/tmp.s58miUk114 /tmp/tmp.YGHlp92iYM +++ return 0 ++ echo null + last_chunk=null + retries=2 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/bin/date -d@null '+%Y-%m-%d %H:%M:%S' /usr/bin/date: invalid date ‘@null’ ++ echo ++ format_date 1747665865 ++ local timestamp=1747665865 +++ TZ=UTC +++ /usr/bin/date -d@1747665865 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:44:25 + log 'Waiting for last oplog chunk () to be greater than last write (2025-05-19 14:44:25)' + set +o xtrace [2025-05-19T14:44:56+0000] Waiting for last oplog chunk () to be greater than last write (2025-05-19 14:44:25) + sleep 10 + [[ null -gt 1747665865 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.MutkW4iSST ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3S1zmUmTMe +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.MutkW4iSST +++ cat /tmp/tmp.3S1zmUmTMe +++ rm /tmp/tmp.MutkW4iSST /tmp/tmp.3S1zmUmTMe +++ return 0 ++ echo null + last_chunk=null + retries=3 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/bin/date -d@null '+%Y-%m-%d %H:%M:%S' /usr/bin/date: invalid date ‘@null’ ++ echo ++ format_date 1747665865 ++ local timestamp=1747665865 +++ TZ=UTC +++ /usr/bin/date -d@1747665865 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:44:25 + log 'Waiting for last oplog chunk () to be greater than last write (2025-05-19 14:44:25)' + set +o xtrace [2025-05-19T14:45:09+0000] Waiting for last oplog chunk () to be greater than last write (2025-05-19 14:44:25) + sleep 10 + [[ null -gt 1747665865 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JwreMIdK3v ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nHfBnNA5rV +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.JwreMIdK3v +++ cat /tmp/tmp.nHfBnNA5rV +++ rm /tmp/tmp.JwreMIdK3v /tmp/tmp.nHfBnNA5rV +++ return 0 ++ echo null + last_chunk=null + retries=4 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/bin/date -d@null '+%Y-%m-%d %H:%M:%S' /usr/bin/date: invalid date ‘@null’ ++ echo ++ format_date 1747665865 ++ local timestamp=1747665865 +++ TZ=UTC +++ /usr/bin/date -d@1747665865 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:44:25 + log 'Waiting for last oplog chunk () to be greater than last write (2025-05-19 14:44:25)' + set +o xtrace [2025-05-19T14:45:21+0000] Waiting for last oplog chunk () to be greater than last write (2025-05-19 14:44:25) + sleep 10 + [[ null -gt 1747665865 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9qPH7KzGOh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uPEMePkczN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.9qPH7KzGOh +++ cat /tmp/tmp.uPEMePkczN +++ rm /tmp/tmp.9qPH7KzGOh /tmp/tmp.uPEMePkczN +++ return 0 ++ echo null + last_chunk=null + retries=5 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/bin/date -d@null '+%Y-%m-%d %H:%M:%S' /usr/bin/date: invalid date ‘@null’ ++ echo ++ format_date 1747665865 ++ local timestamp=1747665865 +++ TZ=UTC +++ /usr/bin/date -d@1747665865 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:44:25 + log 'Waiting for last oplog chunk () to be greater than last write (2025-05-19 14:44:25)' + set +o xtrace [2025-05-19T14:45:34+0000] Waiting for last oplog chunk () to be greater than last write (2025-05-19 14:44:25) + sleep 10 + [[ null -gt 1747665865 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.L8SUj6JW5w ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nuNAc1Ugtl +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.L8SUj6JW5w +++ cat /tmp/tmp.nuNAc1Ugtl +++ rm /tmp/tmp.L8SUj6JW5w /tmp/tmp.nuNAc1Ugtl +++ return 0 ++ echo 1747665938 + last_chunk=1747665938 + retries=6 ++ format_date 1747665938 ++ local timestamp=1747665938 +++ TZ=UTC +++ /usr/bin/date -d@1747665938 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:45:38 ++ format_date 1747665865 ++ local timestamp=1747665865 +++ TZ=UTC +++ /usr/bin/date -d@1747665865 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:44:25 + log 'Waiting for last oplog chunk (2025-05-19 14:45:38) to be greater than last write (2025-05-19 14:44:25)' + set +o xtrace [2025-05-19T14:45:47+0000] Waiting for last oplog chunk (2025-05-19 14:45:38) to be greater than last write (2025-05-19 14:44:25) + sleep 10 + [[ 1747665938 -gt 1747665865 ]] +++ get_latest_oplog_chunk_ts some-name +++ local cluster=some-name ++++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.PjNlWw5Vpw +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.q7jJ12C17D ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.PjNlWw5Vpw ++++ cat /tmp/tmp.q7jJ12C17D ++++ rm /tmp/tmp.PjNlWw5Vpw /tmp/tmp.q7jJ12C17D ++++ return 0 +++ echo 1747665938 ++ format_date 1747665938 ++ local timestamp=1747665938 +++ TZ=UTC +++ /usr/bin/date -d@1747665938 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:45:38 + local 'target_time=2025-05-19 14:45:38' + log 'dropping test collection' + set +o xtrace [2025-05-19T14:45:59+0000] dropping test collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mjl0oLQZdE +++ mktemp ++ local LAST_ERR=/tmp/tmp.pOahAvdRui ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mjl0oLQZdE ++ cat /tmp/tmp.pOahAvdRui ++ rm /tmp/tmp.mjl0oLQZdE /tmp/tmp.pOahAvdRui ++ return 0 + local client_container=psmdb-client-66f577db5f-678sd + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-18803 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.VB7J62QZhO ++ mktemp + local LAST_ERR=/tmp/tmp.Jm4c5VaTLr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VB7J62QZhO Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("afbdfc04-7c4b-41be-a12c-fceafeb7d762") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.Jm4c5VaTLr + rm /tmp/tmp.VB7J62QZhO /tmp/tmp.Jm4c5VaTLr + return 0 + log 'checking pitr... backup: backup-nfs-logical-pitr target: 2025-05-19 14:45:38' + set +o xtrace [2025-05-19T14:46:02+0000] checking pitr... backup: backup-nfs-logical-pitr target: 2025-05-19 14:45:38 + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/conf/pitr.yml + yq eval '.metadata.name = "restore-backup-nfs-logical-pitr"' + yq eval '.spec.backupName = "backup-nfs-logical-pitr"' + yq eval '.spec.pitr.date = "2025-05-19 14:45:38"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.G6pxynNkNs ++ mktemp + local LAST_ERR=/tmp/tmp.Mhes0TUrTd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.G6pxynNkNs perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-logical-pitr created + cat /tmp/tmp.Mhes0TUrTd + rm /tmp/tmp.G6pxynNkNs /tmp/tmp.Mhes0TUrTd + return 0 + wait_restore backup-nfs-logical-pitr some-name + local backup_name=backup-nfs-logical-pitr + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace waiting psmdb-restore/restore-backup-nfs-logical-pitr to reach ready state..........OK + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sPKQ8EcGoD +++ mktemp ++ local LAST_ERR=/tmp/tmp.GNDvNkj5B2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sPKQ8EcGoD ++ cat /tmp/tmp.GNDvNkj5B2 ++ rm /tmp/tmp.sPKQ8EcGoD /tmp/tmp.GNDvNkj5B2 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-18803 -3rd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-19T14:46:33+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-18803 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FWb1wwM8R0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.WcF8HnhBGl ++ local exit_status=0 ++ local timeout=4 + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FWb1wwM8R0 ++ cat /tmp/tmp.WcF8HnhBGl ++ rm /tmp/tmp.FWb1wwM8R0 /tmp/tmp.WcF8HnhBGl ++ return 0 + local client_container=psmdb-client-66f577db5f-678sd + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-18803 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.FifgP8TG6a ++ mktemp + local LAST_ERR=/tmp/tmp.ieEpXFXi0Z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FifgP8TG6a + cat /tmp/tmp.ieEpXFXi0Z + rm /tmp/tmp.FifgP8TG6a /tmp/tmp.ieEpXFXi0Z + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/compare/find-3rd.json /tmp/tmp.NzbDqpY8QV/find-3rd + desc 'CASE 3: Physical backup and restore' + set +o xtrace ----------------------------------------------------------------------------------- CASE 3: Physical backup and restore ----------------------------------------------------------------------------------- + backup_name=backup-nfs-physical + run_backup nfs backup-nfs-physical physical + local storage=nfs + local backup_name=backup-nfs-physical + local type=physical + desc 'run backup backup-nfs-physical' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-nfs-physical ----------------------------------------------------------------------------------- + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.K8rg7wRKCw ++ mktemp + local LAST_ERR=/tmp/tmp.ZlXpfwVNSl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + yq eval '.metadata.name = "backup-nfs-physical" | .spec.storageName = "nfs" | .spec.type = "physical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/conf/backup-nfs.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.K8rg7wRKCw perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical created + cat /tmp/tmp.ZlXpfwVNSl + rm /tmp/tmp.K8rg7wRKCw /tmp/tmp.ZlXpfwVNSl + return 0 + wait_backup backup-nfs-physical + local backup_name=backup-nfs-physical + local target_state=ready + set +o xtrace waiting for backup-nfs-physical to reach ready state..... + run_recovery_check backup-nfs-physical some-name -4th -3rd + local backup=backup-nfs-physical + local cluster=some-name + local find_prefix_before=-4th + local find_prefix_after=-3rd + write_data 100501 -4th + local x=100501 + local find_prefix=-4th + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.smLyghYLsT +++ mktemp ++ local LAST_ERR=/tmp/tmp.Lqak5WdEBn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.smLyghYLsT ++ cat /tmp/tmp.Lqak5WdEBn ++ rm /tmp/tmp.smLyghYLsT /tmp/tmp.Lqak5WdEBn ++ return 0 + local client_container=psmdb-client-66f577db5f-678sd + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-18803 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Z86Y8nJGds ++ mktemp + local LAST_ERR=/tmp/tmp.6pmbjqOmis + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Z86Y8nJGds Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("85e14f25-d249-4a65-a491-0c84f16d58a1") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.6pmbjqOmis + rm /tmp/tmp.Z86Y8nJGds /tmp/tmp.6pmbjqOmis + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-18803 -4th .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local postfix=-4th + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-19T14:46:57+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-18803 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2ErU9AdyS5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.mJTLH8D3eQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2ErU9AdyS5 ++ cat /tmp/tmp.mJTLH8D3eQ ++ rm /tmp/tmp.2ErU9AdyS5 /tmp/tmp.mJTLH8D3eQ ++ return 0 + local client_container=psmdb-client-66f577db5f-678sd + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-18803 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.iKPnHyzv5k ++ mktemp + local LAST_ERR=/tmp/tmp.XEKF4krjqe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iKPnHyzv5k + cat /tmp/tmp.XEKF4krjqe + rm /tmp/tmp.iKPnHyzv5k /tmp/tmp.XEKF4krjqe + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/compare/find-4th.json /tmp/tmp.NzbDqpY8QV/find-4th + run_restore backup-nfs-physical + local backup_name=backup-nfs-physical + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/conf/restore.yml + /usr/bin/sed -e 's/name:/name: restore-backup-nfs-physical/' + kubectl_bin apply -f - ++ mktemp + /usr/bin/sed -e 's/backupName:/backupName: backup-nfs-physical/' + local LAST_OUT=/tmp/tmp.EE1YvvHqx9 ++ mktemp + local LAST_ERR=/tmp/tmp.bRp23psZ7o + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EE1YvvHqx9 perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-physical created + cat /tmp/tmp.bRp23psZ7o + rm /tmp/tmp.EE1YvvHqx9 /tmp/tmp.bRp23psZ7o + return 0 + wait_restore backup-nfs-physical some-name + local backup_name=backup-nfs-physical + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace waiting psmdb-restore/restore-backup-nfs-physical to reach ready state........................................................................................................OK + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Qb3EioLWYL +++ mktemp ++ local LAST_ERR=/tmp/tmp.deBVhcqWvZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Qb3EioLWYL ++ cat /tmp/tmp.deBVhcqWvZ ++ rm /tmp/tmp.Qb3EioLWYL /tmp/tmp.deBVhcqWvZ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qVkB7v8BZx +++ mktemp ++ local LAST_ERR=/tmp/tmp.Y1O2f4SeCt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qVkB7v8BZx ++ cat /tmp/tmp.Y1O2f4SeCt ++ rm /tmp/tmp.qVkB7v8BZx /tmp/tmp.Y1O2f4SeCt ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZVctaGyjl2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.O22XjfP53A ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZVctaGyjl2 ++ cat /tmp/tmp.O22XjfP53A ++ rm /tmp/tmp.ZVctaGyjl2 /tmp/tmp.O22XjfP53A ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8hkRzZt1ye +++ mktemp ++ local LAST_ERR=/tmp/tmp.5oe52vwdGL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8hkRzZt1ye ++ cat /tmp/tmp.5oe52vwdGL ++ rm /tmp/tmp.8hkRzZt1ye /tmp/tmp.5oe52vwdGL ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8USH80bPhh +++ mktemp ++ local LAST_ERR=/tmp/tmp.wusuXwNfL7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8USH80bPhh ++ cat /tmp/tmp.wusuXwNfL7 ++ rm /tmp/tmp.8USH80bPhh /tmp/tmp.wusuXwNfL7 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.clqwmNQLFK +++ mktemp ++ local LAST_ERR=/tmp/tmp.uVunGJQwnx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.clqwmNQLFK ++ cat /tmp/tmp.uVunGJQwnx ++ rm /tmp/tmp.clqwmNQLFK /tmp/tmp.uVunGJQwnx ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.U29zDsSyi0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.pMC6ubvUQX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.U29zDsSyi0 ++ cat /tmp/tmp.pMC6ubvUQX ++ rm /tmp/tmp.U29zDsSyi0 /tmp/tmp.pMC6ubvUQX ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MTq5E3sSUr +++ mktemp ++ local LAST_ERR=/tmp/tmp.bJJucIYGja ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MTq5E3sSUr ++ cat /tmp/tmp.bJJucIYGja ++ rm /tmp/tmp.MTq5E3sSUr /tmp/tmp.bJJucIYGja ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qAyrJoME2N +++ mktemp ++ local LAST_ERR=/tmp/tmp.JVzOtKqbQ9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qAyrJoME2N ++ cat /tmp/tmp.JVzOtKqbQ9 ++ rm /tmp/tmp.qAyrJoME2N /tmp/tmp.JVzOtKqbQ9 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qd7aNkFV1W +++ mktemp ++ local LAST_ERR=/tmp/tmp.8h94AIerOH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qd7aNkFV1W ++ cat /tmp/tmp.8h94AIerOH ++ rm /tmp/tmp.qd7aNkFV1W /tmp/tmp.8h94AIerOH ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XGcv24OrSN +++ mktemp ++ local LAST_ERR=/tmp/tmp.M6PGPNESuB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XGcv24OrSN ++ cat /tmp/tmp.M6PGPNESuB ++ rm /tmp/tmp.XGcv24OrSN /tmp/tmp.M6PGPNESuB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5gIUbycwbu +++ mktemp ++ local LAST_ERR=/tmp/tmp.fOeZhTIvGS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5gIUbycwbu ++ cat /tmp/tmp.fOeZhTIvGS ++ rm /tmp/tmp.5gIUbycwbu /tmp/tmp.fOeZhTIvGS ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.T9p9KVOiO2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.N6jHj1Maa3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.T9p9KVOiO2 ++ cat /tmp/tmp.N6jHj1Maa3 ++ rm /tmp/tmp.T9p9KVOiO2 /tmp/tmp.N6jHj1Maa3 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.J6VBueBAAF +++ mktemp ++ local LAST_ERR=/tmp/tmp.CIned5eM8Z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.J6VBueBAAF ++ cat /tmp/tmp.CIned5eM8Z ++ rm /tmp/tmp.J6VBueBAAF /tmp/tmp.CIned5eM8Z ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.or8oWgmrIF +++ mktemp ++ local LAST_ERR=/tmp/tmp.hcanMrFNqS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.or8oWgmrIF ++ cat /tmp/tmp.hcanMrFNqS ++ rm /tmp/tmp.or8oWgmrIF /tmp/tmp.hcanMrFNqS ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-18803 -3rd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-19T14:53:51+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-18803 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_OUT=/tmp/tmp.07qNxQA9yn +++ mktemp ++ local LAST_ERR=/tmp/tmp.3gztQhOEZM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.07qNxQA9yn ++ cat /tmp/tmp.3gztQhOEZM ++ rm /tmp/tmp.07qNxQA9yn /tmp/tmp.3gztQhOEZM ++ return 0 + local client_container=psmdb-client-66f577db5f-678sd + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-18803 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.KzZYYXoKG4 ++ mktemp + local LAST_ERR=/tmp/tmp.vfzXWrWJLQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KzZYYXoKG4 + cat /tmp/tmp.vfzXWrWJLQ + rm /tmp/tmp.KzZYYXoKG4 /tmp/tmp.vfzXWrWJLQ + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/compare/find-3rd.json /tmp/tmp.NzbDqpY8QV/find-3rd + desc 'CASE 4: Physical backup and PiTR' + set +o xtrace ----------------------------------------------------------------------------------- CASE 4: Physical backup and PiTR ----------------------------------------------------------------------------------- + backup_name=backup-nfs-physical-pitr + run_backup nfs backup-nfs-physical-pitr physical + local storage=nfs + local backup_name=backup-nfs-physical-pitr + local type=physical + desc 'run backup backup-nfs-physical-pitr' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-nfs-physical-pitr ----------------------------------------------------------------------------------- + kubectl_bin apply -f - + yq eval '.metadata.name = "backup-nfs-physical-pitr" | .spec.storageName = "nfs" | .spec.type = "physical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/conf/backup-nfs.yml ++ mktemp + local LAST_OUT=/tmp/tmp.9MxP3pCsPI ++ mktemp + local LAST_ERR=/tmp/tmp.7sBCGMqmTP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9MxP3pCsPI perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical-pitr created + cat /tmp/tmp.7sBCGMqmTP + rm /tmp/tmp.9MxP3pCsPI /tmp/tmp.7sBCGMqmTP + return 0 + wait_backup backup-nfs-physical-pitr + local backup_name=backup-nfs-physical-pitr + local target_state=ready + set +o xtrace waiting for backup-nfs-physical-pitr to reach ready state..... + write_data 100503 -5th + local x=100503 + local find_prefix=-5th + run_mongo 'use myApp\n db.test.insert({ x: 100503 })' myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local 'command=use myApp\n db.test.insert({ x: 100503 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IHgZvm6woY +++ mktemp ++ local LAST_ERR=/tmp/tmp.H2D7HveWLN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IHgZvm6woY ++ cat /tmp/tmp.H2D7HveWLN ++ rm /tmp/tmp.IHgZvm6woY /tmp/tmp.H2D7HveWLN ++ return 0 + local client_container=psmdb-client-66f577db5f-678sd + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-18803 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.DDUz36NXd6 ++ mktemp + local LAST_ERR=/tmp/tmp.yGpSNdsBCz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DDUz36NXd6 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("6f113222-a797-4f43-ae1e-3cb164089333") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.yGpSNdsBCz + rm /tmp/tmp.DDUz36NXd6 /tmp/tmp.yGpSNdsBCz + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-18803 -5th .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local postfix=-5th + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-19T14:54:13+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-18803 mongodb .svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rjjyX5IINZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.p2dBGpu7na ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rjjyX5IINZ ++ cat /tmp/tmp.p2dBGpu7na ++ rm /tmp/tmp.rjjyX5IINZ /tmp/tmp.p2dBGpu7na ++ return 0 + local client_container=psmdb-client-66f577db5f-678sd + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-18803 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.45x43wDQFY ++ mktemp + local LAST_ERR=/tmp/tmp.qq6wasRuqJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.45x43wDQFY + cat /tmp/tmp.qq6wasRuqJ + rm /tmp/tmp.45x43wDQFY /tmp/tmp.qq6wasRuqJ + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/compare/find-5th.json /tmp/tmp.NzbDqpY8QV/find-5th + run_pitr_check backup-nfs-physical-pitr some-name -5th + local backup=backup-nfs-physical-pitr + local cluster=some-name + local find_prefix=-5th + wait_for_oplogs some-name + local cluster1=some-name ++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ jq '.backups.snapshot[0].restoreTo' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AXu1XGdgGf +++ mktemp ++ local LAST_ERR=/tmp/tmp.fgSMZhZr4I ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AXu1XGdgGf ++ cat /tmp/tmp.fgSMZhZr4I ++ rm /tmp/tmp.AXu1XGdgGf /tmp/tmp.fgSMZhZr4I ++ return 0 + local backup_last_write=1747666441 + local retries=0 ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.svdSpHzqvr ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6ktXfCaG3w +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.svdSpHzqvr +++ cat /tmp/tmp.6ktXfCaG3w +++ rm /tmp/tmp.svdSpHzqvr /tmp/tmp.6ktXfCaG3w +++ return 0 ++ echo 1747666031 + local last_chunk=1747666031 + [[ 1747666031 -gt 1747666441 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.49Krs9ObaG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cYHzBuGwO3 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.49Krs9ObaG +++ cat /tmp/tmp.cYHzBuGwO3 +++ rm /tmp/tmp.49Krs9ObaG /tmp/tmp.cYHzBuGwO3 +++ return 0 ++ echo 1747666031 + last_chunk=1747666031 + retries=1 ++ format_date 1747666031 ++ local timestamp=1747666031 +++ TZ=UTC +++ /usr/bin/date -d@1747666031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:47:11 ++ format_date 1747666441 ++ local timestamp=1747666441 +++ TZ=UTC +++ /usr/bin/date -d@1747666441 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:54:01 + log 'Waiting for last oplog chunk (2025-05-19 14:47:11) to be greater than last write (2025-05-19 14:54:01)' + set +o xtrace [2025-05-19T14:54:24+0000] Waiting for last oplog chunk (2025-05-19 14:47:11) to be greater than last write (2025-05-19 14:54:01) + sleep 10 + [[ 1747666031 -gt 1747666441 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.MuuTnSNBz9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SXxBNBzfZq +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.MuuTnSNBz9 +++ cat /tmp/tmp.SXxBNBzfZq +++ rm /tmp/tmp.MuuTnSNBz9 /tmp/tmp.SXxBNBzfZq +++ return 0 ++ echo 1747666031 + last_chunk=1747666031 + retries=2 ++ format_date 1747666031 ++ local timestamp=1747666031 +++ TZ=UTC +++ /usr/bin/date -d@1747666031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:47:11 ++ format_date 1747666441 ++ local timestamp=1747666441 +++ TZ=UTC +++ /usr/bin/date -d@1747666441 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:54:01 + log 'Waiting for last oplog chunk (2025-05-19 14:47:11) to be greater than last write (2025-05-19 14:54:01)' + set +o xtrace [2025-05-19T14:54:36+0000] Waiting for last oplog chunk (2025-05-19 14:47:11) to be greater than last write (2025-05-19 14:54:01) + sleep 10 + [[ 1747666031 -gt 1747666441 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ local LAST_OUT=/tmp/tmp.D60Ly2ETeC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.EFgB9klvtk +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.D60Ly2ETeC +++ cat /tmp/tmp.EFgB9klvtk +++ rm /tmp/tmp.D60Ly2ETeC /tmp/tmp.EFgB9klvtk +++ return 0 ++ echo 1747666031 + last_chunk=1747666031 + retries=3 ++ format_date 1747666031 ++ local timestamp=1747666031 +++ TZ=UTC +++ /usr/bin/date -d@1747666031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:47:11 ++ format_date 1747666441 ++ local timestamp=1747666441 +++ TZ=UTC +++ /usr/bin/date -d@1747666441 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:54:01 + log 'Waiting for last oplog chunk (2025-05-19 14:47:11) to be greater than last write (2025-05-19 14:54:01)' + set +o xtrace [2025-05-19T14:54:49+0000] Waiting for last oplog chunk (2025-05-19 14:47:11) to be greater than last write (2025-05-19 14:54:01) + sleep 10 + [[ 1747666031 -gt 1747666441 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.s6p8YkFgjd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Ye447LIREF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.s6p8YkFgjd +++ cat /tmp/tmp.Ye447LIREF +++ rm /tmp/tmp.s6p8YkFgjd /tmp/tmp.Ye447LIREF +++ return 0 ++ echo 1747666031 + last_chunk=1747666031 + retries=4 ++ format_date 1747666031 ++ local timestamp=1747666031 +++ TZ=UTC +++ /usr/bin/date -d@1747666031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:47:11 ++ format_date 1747666441 ++ local timestamp=1747666441 +++ TZ=UTC +++ /usr/bin/date -d@1747666441 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:54:01 + log 'Waiting for last oplog chunk (2025-05-19 14:47:11) to be greater than last write (2025-05-19 14:54:01)' + set +o xtrace [2025-05-19T14:55:02+0000] Waiting for last oplog chunk (2025-05-19 14:47:11) to be greater than last write (2025-05-19 14:54:01) + sleep 10 + [[ 1747666031 -gt 1747666441 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XwIzBCEzQw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PZTIgnVrjB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.XwIzBCEzQw +++ cat /tmp/tmp.PZTIgnVrjB +++ rm /tmp/tmp.XwIzBCEzQw /tmp/tmp.PZTIgnVrjB +++ return 0 ++ echo 1747666031 + last_chunk=1747666031 + retries=5 ++ format_date 1747666031 ++ local timestamp=1747666031 +++ TZ=UTC +++ /usr/bin/date -d@1747666031 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:47:11 ++ format_date 1747666441 ++ local timestamp=1747666441 +++ TZ=UTC +++ /usr/bin/date -d@1747666441 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:54:01 + log 'Waiting for last oplog chunk (2025-05-19 14:47:11) to be greater than last write (2025-05-19 14:54:01)' + set +o xtrace [2025-05-19T14:55:14+0000] Waiting for last oplog chunk (2025-05-19 14:47:11) to be greater than last write (2025-05-19 14:54:01) + sleep 10 + [[ 1747666031 -gt 1747666441 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.J8ZR3HqyWB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nF1S1ZPlUO +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.J8ZR3HqyWB +++ cat /tmp/tmp.nF1S1ZPlUO +++ rm /tmp/tmp.J8ZR3HqyWB /tmp/tmp.nF1S1ZPlUO +++ return 0 ++ echo 1747666512 + last_chunk=1747666512 + retries=6 ++ format_date 1747666512 ++ local timestamp=1747666512 +++ TZ=UTC +++ /usr/bin/date -d@1747666512 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:55:12 ++ format_date 1747666441 ++ local timestamp=1747666441 +++ TZ=UTC +++ /usr/bin/date -d@1747666441 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:54:01 + log 'Waiting for last oplog chunk (2025-05-19 14:55:12) to be greater than last write (2025-05-19 14:54:01)' + set +o xtrace [2025-05-19T14:55:27+0000] Waiting for last oplog chunk (2025-05-19 14:55:12) to be greater than last write (2025-05-19 14:54:01) + sleep 10 + [[ 1747666512 -gt 1747666441 ]] +++ get_latest_oplog_chunk_ts some-name +++ local cluster=some-name ++++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.1chnDVNMq2 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.l815wWXXdC ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.1chnDVNMq2 ++++ cat /tmp/tmp.l815wWXXdC ++++ rm /tmp/tmp.1chnDVNMq2 /tmp/tmp.l815wWXXdC ++++ return 0 +++ echo 1747666512 ++ format_date 1747666512 ++ local timestamp=1747666512 +++ TZ=UTC +++ /usr/bin/date -d@1747666512 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-05-19 14:55:12 + local 'target_time=2025-05-19 14:55:12' + log 'dropping test collection' + set +o xtrace [2025-05-19T14:55:41+0000] dropping test collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.l59LEx25oY +++ mktemp ++ local LAST_ERR=/tmp/tmp.tbb7mdWHIx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.l59LEx25oY ++ cat /tmp/tmp.tbb7mdWHIx ++ rm /tmp/tmp.l59LEx25oY /tmp/tmp.tbb7mdWHIx ++ return 0 + local client_container=psmdb-client-66f577db5f-678sd + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-18803 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.xCCa47OG64 ++ mktemp + local LAST_ERR=/tmp/tmp.MOqSpPU2HI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xCCa47OG64 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-18803.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("3d100bed-3cab-4ac4-8689-40efe4aab1f2") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.MOqSpPU2HI + rm /tmp/tmp.xCCa47OG64 /tmp/tmp.MOqSpPU2HI + return 0 + log 'checking pitr... backup: backup-nfs-physical-pitr target: 2025-05-19 14:55:12' + set +o xtrace [2025-05-19T14:55:45+0000] checking pitr... backup: backup-nfs-physical-pitr target: 2025-05-19 14:55:12 + kubectl_bin apply -f - + yq eval '.metadata.name = "restore-backup-nfs-physical-pitr"' + yq eval '.spec.backupName = "backup-nfs-physical-pitr"' + yq eval '.spec.pitr.date = "2025-05-19 14:55:12"' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/conf/pitr.yml ++ mktemp + local LAST_OUT=/tmp/tmp.U0kCaXSp2n ++ mktemp + local LAST_ERR=/tmp/tmp.RsDYbKiWsj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.U0kCaXSp2n perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-physical-pitr created + cat /tmp/tmp.RsDYbKiWsj + rm /tmp/tmp.U0kCaXSp2n /tmp/tmp.RsDYbKiWsj + return 0 + wait_restore backup-nfs-physical-pitr some-name + local backup_name=backup-nfs-physical-pitr + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace waiting psmdb-restore/restore-backup-nfs-physical-pitr to reach ready state.........................................................................................................................................OK + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VlPvq1EDjK +++ mktemp ++ local LAST_ERR=/tmp/tmp.l0hacNA2Fs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VlPvq1EDjK ++ cat /tmp/tmp.l0hacNA2Fs ++ rm /tmp/tmp.VlPvq1EDjK /tmp/tmp.l0hacNA2Fs ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZwcvcomvLD +++ mktemp ++ local LAST_ERR=/tmp/tmp.NWphfrXaIc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZwcvcomvLD ++ cat /tmp/tmp.NWphfrXaIc ++ rm /tmp/tmp.ZwcvcomvLD /tmp/tmp.NWphfrXaIc ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.L1orMbdefI +++ mktemp ++ local LAST_ERR=/tmp/tmp.80MWYwoPjt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.L1orMbdefI ++ cat /tmp/tmp.80MWYwoPjt ++ rm /tmp/tmp.L1orMbdefI /tmp/tmp.80MWYwoPjt ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AJMOTyIZex +++ mktemp ++ local LAST_ERR=/tmp/tmp.v2PzOCj95C ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AJMOTyIZex ++ cat /tmp/tmp.v2PzOCj95C ++ rm /tmp/tmp.AJMOTyIZex /tmp/tmp.v2PzOCj95C ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.w9fUwsqJxt +++ mktemp ++ local LAST_ERR=/tmp/tmp.reHkACLTvY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.w9fUwsqJxt ++ cat /tmp/tmp.reHkACLTvY ++ rm /tmp/tmp.w9fUwsqJxt /tmp/tmp.reHkACLTvY ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NzJJEkouWC +++ mktemp ++ local LAST_ERR=/tmp/tmp.mFXUqmfySU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NzJJEkouWC ++ cat /tmp/tmp.mFXUqmfySU ++ rm /tmp/tmp.NzJJEkouWC /tmp/tmp.mFXUqmfySU ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2x1k5EaN1W +++ mktemp ++ local LAST_ERR=/tmp/tmp.mdCn0aOUxj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2x1k5EaN1W ++ cat /tmp/tmp.mdCn0aOUxj ++ rm /tmp/tmp.2x1k5EaN1W /tmp/tmp.mdCn0aOUxj ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IbjSgAKJUk +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZrAvziyLuB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IbjSgAKJUk ++ cat /tmp/tmp.ZrAvziyLuB ++ rm /tmp/tmp.IbjSgAKJUk /tmp/tmp.ZrAvziyLuB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TbTIRngIq4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.HiHPhKwKjd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TbTIRngIq4 ++ cat /tmp/tmp.HiHPhKwKjd ++ rm /tmp/tmp.TbTIRngIq4 /tmp/tmp.HiHPhKwKjd ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jd7J0s1Wrs +++ mktemp ++ local LAST_ERR=/tmp/tmp.JD8cqigAZN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jd7J0s1Wrs ++ cat /tmp/tmp.JD8cqigAZN ++ rm /tmp/tmp.jd7J0s1Wrs /tmp/tmp.JD8cqigAZN ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.I9wAezMJVw +++ mktemp ++ local LAST_ERR=/tmp/tmp.IqnE0jYceh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.I9wAezMJVw ++ cat /tmp/tmp.IqnE0jYceh ++ rm /tmp/tmp.I9wAezMJVw /tmp/tmp.IqnE0jYceh ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.u5crqwujCD +++ mktemp ++ local LAST_ERR=/tmp/tmp.vmFmBxuRWv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.u5crqwujCD ++ cat /tmp/tmp.vmFmBxuRWv ++ rm /tmp/tmp.u5crqwujCD /tmp/tmp.vmFmBxuRWv ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Plcx7gurWL +++ mktemp ++ local LAST_ERR=/tmp/tmp.uNfAtSo5yg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Plcx7gurWL ++ cat /tmp/tmp.uNfAtSo5yg ++ rm /tmp/tmp.Plcx7gurWL /tmp/tmp.uNfAtSo5yg ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-18803 -5th .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local postfix=-5th + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-19T15:03:27+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-18803 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-18803 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.N81XH6eBcm +++ mktemp ++ local LAST_ERR=/tmp/tmp.AOMEdboFKy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.N81XH6eBcm ++ cat /tmp/tmp.AOMEdboFKy ++ rm /tmp/tmp.N81XH6eBcm /tmp/tmp.AOMEdboFKy ++ return 0 + local client_container=psmdb-client-66f577db5f-678sd + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-18803 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ImuV12Sk4K ++ mktemp + local LAST_ERR=/tmp/tmp.LnB9AFUVDJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-678sd -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-18803.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ImuV12Sk4K + cat /tmp/tmp.LnB9AFUVDJ + rm /tmp/tmp.ImuV12Sk4K /tmp/tmp.LnB9AFUVDJ + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/demand-backup-fs/compare/find-5th.json /tmp/tmp.NzbDqpY8QV/find-5th + destroy demand-backup-fs-18803 + local namespace=demand-backup-fs-18803 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.UyqdgwOeh3 ++ mktemp + local LAST_ERR=/tmp/tmp.mruZMbwnni + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UyqdgwOeh3 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.mruZMbwnni + rm /tmp/tmp.UyqdgwOeh3 /tmp/tmp.mruZMbwnni + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-18803 backup-nfs-logical-pitr --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical-pitr patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-18803 backup-nfs-physical --type=merge -p '{"metadata":{"finalizers":[]}}' Error from server (NotFound): perconaservermongodbbackups.psmdb.percona.com "backup-nfs-physical" not found + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.18W0KGGLMq ++ mktemp + local LAST_ERR=/tmp/tmp.KaCG52eU1D + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.18W0KGGLMq + cat /tmp/tmp.KaCG52eU1D + rm /tmp/tmp.18W0KGGLMq /tmp/tmp.KaCG52eU1D + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.WQka03XC04 ++ mktemp + local LAST_ERR=/tmp/tmp.iuFPo3vgKC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WQka03XC04 + cat /tmp/tmp.iuFPo3vgKC + rm /tmp/tmp.WQka03XC04 /tmp/tmp.iuFPo3vgKC + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + kubectl patch perconaservermongodbs.psmdb.percona.com -n demand-backup-fs-18803 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/some-name patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.UugsqkbeO2 ++ mktemp + local LAST_ERR=/tmp/tmp.IKFpnv1kt9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UugsqkbeO2 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.IKFpnv1kt9 + rm /tmp/tmp.UugsqkbeO2 /tmp/tmp.IKFpnv1kt9 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.ihmI0CRDe1 ++ mktemp + local LAST_ERR=/tmp/tmp.uyKcEcat4l + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ihmI0CRDe1 clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.uyKcEcat4l + rm /tmp/tmp.ihmI0CRDe1 /tmp/tmp.uyKcEcat4l + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.qU03s7f8hY ++ mktemp + local LAST_ERR=/tmp/tmp.hmkeab7hXG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.qU03s7f8hY + cat /tmp/tmp.hmkeab7hXG Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.qU03s7f8hY + cat /tmp/tmp.hmkeab7hXG Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.qU03s7f8hY + cat /tmp/tmp.hmkeab7hXG Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.qU03s7f8hY + cat /tmp/tmp.hmkeab7hXG Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.qU03s7f8hY /tmp/tmp.hmkeab7hXG + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace demand-backup-fs-18803 + rm -rf /tmp/tmp.NzbDqpY8QV + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed+ local LAST_OUT=/tmp/tmp.pgRwTPRlIM ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.xrWgWGnT5u ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.Nsw481uNJt + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.Yr7ybmbVfP + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace demand-backup-fs-18803 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator