Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/logs/pitr.log WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 + main + create_infra pitr-11162 + local ns=pitr-11162 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.oepuZ4ml04 ++ mktemp + local LAST_ERR=/tmp/tmp.horytNsg4t + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oepuZ4ml04 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.horytNsg4t + rm /tmp/tmp.oepuZ4ml04 /tmp/tmp.horytNsg4t + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' E0604 15:39:59.419983 17455 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0604 15:39:59.762502 17455 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0604 15:39:59.948464 17455 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0604 15:40:00.061663 17455 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n pitr-27544 backup-minio-0 --type=merge -p '{"metadata":{"finalizers":[]}}' E0604 15:40:01.806903 17797 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0604 15:40:02.083893 17797 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0604 15:40:02.198203 17797 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0604 15:40:02.303309 17797 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request perconaservermongodbbackup.psmdb.percona.com/backup-minio-0 patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n pitr-27544 backup-minio-1 --type=merge -p '{"metadata":{"finalizers":[]}}' E0604 15:40:03.758854 18143 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0604 15:40:04.076268 18143 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0604 15:40:04.210328 18143 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0604 15:40:04.360345 18143 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request perconaservermongodbbackup.psmdb.percona.com/backup-minio-1 patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n pitr-27544 backup-minio-2 --type=merge -p '{"metadata":{"finalizers":[]}}' E0604 15:40:06.026524 18331 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0604 15:40:06.338769 18331 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0604 15:40:06.448136 18331 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0604 15:40:06.554952 18331 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request perconaservermongodbbackup.psmdb.percona.com/backup-minio-2 patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n pitr-27544 backup-minio-3 --type=merge -p '{"metadata":{"finalizers":[]}}' E0604 15:40:08.144157 18561 memcache.go:287] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0604 15:40:08.455364 18561 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0604 15:40:08.563623 18561 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request E0604 15:40:08.674437 18561 memcache.go:121] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request perconaservermongodbbackup.psmdb.percona.com/backup-minio-3 patched + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.iu96AOVlY5 ++ mktemp + local LAST_ERR=/tmp/tmp.MG9m8B9V63 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iu96AOVlY5 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com condition met + cat /tmp/tmp.MG9m8B9V63 + rm /tmp/tmp.iu96AOVlY5 /tmp/tmp.MG9m8B9V63 + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.FyMmUKmq63 ++ mktemp + local LAST_ERR=/tmp/tmp.pyuLf712Hl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FyMmUKmq63 + cat /tmp/tmp.pyuLf712Hl + rm /tmp/tmp.FyMmUKmq63 /tmp/tmp.pyuLf712Hl + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.aa8nbEyhn8 ++ mktemp + local LAST_ERR=/tmp/tmp.Ep0K7O5ALp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aa8nbEyhn8 + cat /tmp/tmp.Ep0K7O5ALp + rm /tmp/tmp.aa8nbEyhn8 /tmp/tmp.Ep0K7O5ALp + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.i0mCR4B1zk ++ mktemp + local LAST_ERR=/tmp/tmp.AxWdBsQVGx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.i0mCR4B1zk clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.AxWdBsQVGx + rm /tmp/tmp.i0mCR4B1zk /tmp/tmp.AxWdBsQVGx + return 0 + check_crd_for_deletion PR-1560-9b9360c1 + local git_tag=PR-1560-9b9360c1 ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1560-9b9360c1/deploy/crd.yaml ++ /usr/bin/sed s/---//g ++ yq eval .metadata.name + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PzjUSatHc8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.RywbD3lDZv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.PzjUSatHc8 ++ cat /tmp/tmp.RywbD3lDZv Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.PzjUSatHc8 ++ cat /tmp/tmp.RywbD3lDZv Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.PzjUSatHc8 ++ cat /tmp/tmp.RywbD3lDZv Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.PzjUSatHc8 ++ cat /tmp/tmp.RywbD3lDZv Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.PzjUSatHc8 /tmp/tmp.RywbD3lDZv ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get clusterrole ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns ++ mktemp + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + kubectl_bin get ns + awk '{print$1}' + local LAST_OUT=/tmp/tmp.JsWAJTvCOi ++ mktemp + local LAST_OUT=/tmp/tmp.N4q0ewCdJT ++ mktemp + local LAST_ERR=/tmp/tmp.Lwxh7Tfsiu + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + local LAST_ERR=/tmp/tmp.UCSJaclIUE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.N4q0ewCdJT + cat /tmp/tmp.UCSJaclIUE + rm /tmp/tmp.N4q0ewCdJT /tmp/tmp.UCSJaclIUE + return 0 namespace "pitr-27544" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JsWAJTvCOi namespace "psmdb-operator" deleted + cat /tmp/tmp.Lwxh7Tfsiu + rm /tmp/tmp.JsWAJTvCOi /tmp/tmp.Lwxh7Tfsiu + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.TatVlsLQWp ++ mktemp + local LAST_ERR=/tmp/tmp.E1UUOwbvIh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TatVlsLQWp + cat /tmp/tmp.E1UUOwbvIh + rm /tmp/tmp.TatVlsLQWp /tmp/tmp.E1UUOwbvIh + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.mTRhly6g4O ++ mktemp + local LAST_ERR=/tmp/tmp.RWmauyDC6e + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mTRhly6g4O namespace/psmdb-operator created + cat /tmp/tmp.RWmauyDC6e + rm /tmp/tmp.mTRhly6g4O /tmp/tmp.RWmauyDC6e + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.aV6i9wUx1z +++ mktemp ++ local LAST_ERR=/tmp/tmp.JZgEhpcIfs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aV6i9wUx1z ++ cat /tmp/tmp.JZgEhpcIfs ++ rm /tmp/tmp.aV6i9wUx1z /tmp/tmp.JZgEhpcIfs ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1560-9b9360c1-2-cluster3 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.NV5GE4eCWv ++ mktemp + local LAST_ERR=/tmp/tmp.Ho8LClh27Y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1560-9b9360c1-2-cluster3 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NV5GE4eCWv Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1560-9b9360c1-2-cluster3" modified. + cat /tmp/tmp.Ho8LClh27Y + rm /tmp/tmp.NV5GE4eCWv /tmp/tmp.Ho8LClh27Y + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.vZKFvRFRsL ++ mktemp + local LAST_ERR=/tmp/tmp.5MtC5dQHZ5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vZKFvRFRsL customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.5MtC5dQHZ5 + rm /tmp/tmp.vZKFvRFRsL /tmp/tmp.5MtC5dQHZ5 + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + kubectl_bin apply -n psmdb-operator -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/deploy/cw-rbac.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.oGq9hZeP6k ++ mktemp + local LAST_ERR=/tmp/tmp.Yn3dlMqvT5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + sed -e 's^namespace: .*^namespace: psmdb-operator^' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oGq9hZeP6k clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.Yn3dlMqvT5 + rm /tmp/tmp.oGq9hZeP6k /tmp/tmp.Yn3dlMqvT5 + return 0 + kubectl_bin apply -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1560-9b9360c1") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.hZeHTTtfB3 ++ mktemp + local LAST_ERR=/tmp/tmp.jeP1GXoewo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hZeHTTtfB3 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.jeP1GXoewo + rm /tmp/tmp.hZeHTTtfB3 /tmp/tmp.jeP1GXoewo + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.5ouyEnbIuV +++ mktemp ++ local LAST_ERR=/tmp/tmp.jZDtISAkOV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5ouyEnbIuV ++ cat /tmp/tmp.jZDtISAkOV ++ rm /tmp/tmp.5ouyEnbIuV /tmp/tmp.jZDtISAkOV ++ return 0 + wait_pod percona-server-mongodb-operator-5d86c5f854-2h5jh + local pod=percona-server-mongodb-operator-5d86c5f854-2h5jh + set +o xtrace waiting for pod/percona-server-mongodb-operator-5d86c5f854-2h5jh to be ready.OK + create_namespace pitr-11162 + local namespace=pitr-11162 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' ++ tail -n1 + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl api-resources ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrole ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces pitr-11162' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pitr-11162 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pitr-11162 --ignore-not-found ++ mktemp + awk '{print$1}' + local LAST_OUT=/tmp/tmp.qm1YbOCp1C + kubectl_bin get ns ++ mktemp + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + local LAST_OUT=/tmp/tmp.pKxFUoTbsn ++ mktemp + local LAST_ERR=/tmp/tmp.MrI0bC3PZb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pitr-11162 --ignore-not-found ++ mktemp + local LAST_ERR=/tmp/tmp.w7nKk4tXFx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qm1YbOCp1C + cat /tmp/tmp.MrI0bC3PZb + rm /tmp/tmp.qm1YbOCp1C /tmp/tmp.MrI0bC3PZb + return 0 + kubectl_bin wait --for=delete namespace pitr-11162 ++ mktemp + local LAST_OUT=/tmp/tmp.LxL3gF648k ++ mktemp + local LAST_ERR=/tmp/tmp.YKpGgcbALA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace pitr-11162 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pKxFUoTbsn + cat /tmp/tmp.w7nKk4tXFx + rm /tmp/tmp.pKxFUoTbsn /tmp/tmp.w7nKk4tXFx + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LxL3gF648k + cat /tmp/tmp.YKpGgcbALA + rm /tmp/tmp.LxL3gF648k /tmp/tmp.YKpGgcbALA + return 0 + desc 'create namespace pitr-11162' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pitr-11162 ----------------------------------------------------------------------------------- + kubectl_bin create namespace pitr-11162 ++ mktemp + local LAST_OUT=/tmp/tmp.jxDuj2HKL5 ++ mktemp + local LAST_ERR=/tmp/tmp.QDPQosJLpX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pitr-11162 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jxDuj2HKL5 namespace/pitr-11162 created + cat /tmp/tmp.QDPQosJLpX + rm /tmp/tmp.jxDuj2HKL5 /tmp/tmp.QDPQosJLpX + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.LK51ha1q5R +++ mktemp ++ local LAST_ERR=/tmp/tmp.rqnDUmaz8B ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LK51ha1q5R ++ cat /tmp/tmp.rqnDUmaz8B ++ rm /tmp/tmp.LK51ha1q5R /tmp/tmp.rqnDUmaz8B ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1560-9b9360c1-2-cluster3 --namespace=pitr-11162 ++ mktemp + local LAST_OUT=/tmp/tmp.61Vkm3KOoJ ++ mktemp + local LAST_ERR=/tmp/tmp.v4ifWcTnQC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1560-9b9360c1-2-cluster3 --namespace=pitr-11162 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.61Vkm3KOoJ Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1560-9b9360c1-2-cluster3" modified. + cat /tmp/tmp.v4ifWcTnQC + rm /tmp/tmp.61Vkm3KOoJ /tmp/tmp.v4ifWcTnQC + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Tue Jun 4 15:41:33 2024 NAMESPACE: pitr-11162 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.pitr-11162.svc.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace pitr-11162 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace pitr-11162 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace pitr-11162 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace pitr-11162 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5GM0shNmSP +++ mktemp ++ local LAST_ERR=/tmp/tmp.hGAdV2Lxdp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5GM0shNmSP ++ cat /tmp/tmp.hGAdV2Lxdp ++ rm /tmp/tmp.5GM0shNmSP /tmp/tmp.hGAdV2Lxdp ++ return 0 + MINIO_POD=minio-service-57dd49b-6zfzb + wait_pod minio-service-57dd49b-6zfzb + local pod=minio-service-57dd49b-6zfzb + set +o xtrace waiting for pod/minio-service-57dd49b-6zfzb to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-11162.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.YqTLGNVSDn ++ mktemp + local LAST_ERR=/tmp/tmp.5Fd2dg8U6r + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-11162.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YqTLGNVSDn service/minio-service created + cat /tmp/tmp.5Fd2dg8U6r + rm /tmp/tmp.YqTLGNVSDn /tmp/tmp.5Fd2dg8U6r + return 0 + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.sOeM4D7EY1 ++ mktemp + local LAST_ERR=/tmp/tmp.DkoLVNY6e2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sOeM4D7EY1 make_bucket: operator-testing pod "aws-cli" deleted + cat /tmp/tmp.DkoLVNY6e2 If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: unable to upgrade connection: container aws-cli not found in pod aws-cli_pitr-11162 + rm /tmp/tmp.sOeM4D7EY1 /tmp/tmp.DkoLVNY6e2 + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/conf/minio-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.90uKYiukEQ ++ mktemp + local LAST_ERR=/tmp/tmp.IyVoDsi2ZG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/conf/minio-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.90uKYiukEQ secret/some-users created deployment.apps/psmdb-client created secret/minio-secret created + cat /tmp/tmp.IyVoDsi2ZG + rm /tmp/tmp.90uKYiukEQ /tmp/tmp.IyVoDsi2ZG + return 0 + cluster=some-name-rs0 + desc 'create first PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/conf/some-name-rs0.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/conf/some-name-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ts6yySByIj + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1560-9b9360c1"' + yq eval '.spec.upgradeOptions.apply="Never"' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/conf/some-name-rs0.yml ++ mktemp + local LAST_ERR=/tmp/tmp.yKhLqFCxq7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ts6yySByIj perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.yKhLqFCxq7 + rm /tmp/tmp.ts6yySByIj /tmp/tmp.yKhLqFCxq7 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready..................OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready...........OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.T0qbXASoRl +++ mktemp ++ local LAST_ERR=/tmp/tmp.8W5tCanHD3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.T0qbXASoRl ++ cat /tmp/tmp.8W5tCanHD3 ++ rm /tmp/tmp.T0qbXASoRl /tmp/tmp.8W5tCanHD3 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready............OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rX2M1mzs5A +++ mktemp ++ local LAST_ERR=/tmp/tmp.I0HGjmDPGK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rX2M1mzs5A ++ cat /tmp/tmp.I0HGjmDPGK ++ rm /tmp/tmp.rX2M1mzs5A /tmp/tmp.I0HGjmDPGK ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.4exwRjQ5eQ/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/statefulset_some-name-rs0-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("pitr-11162", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml statefulset/some-name-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.UOMKGZrOEf ++ mktemp + local LAST_ERR=/tmp/tmp.9tRVFZjvFq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UOMKGZrOEf + cat /tmp/tmp.9tRVFZjvFq + rm /tmp/tmp.UOMKGZrOEf /tmp/tmp.9tRVFZjvFq + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.4exwRjQ5eQ/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.4exwRjQ5eQ/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.4exwRjQ5eQ/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/statefulset_some-name-rs0.yml /tmp/tmp.4exwRjQ5eQ/statefulset_some-name-rs0.yml + write_initial_data + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.pitr-11162 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.pitr-11162 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gKMPdJBamg +++ mktemp ++ local LAST_ERR=/tmp/tmp.HKMEkE2QX6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gKMPdJBamg ++ cat /tmp/tmp.HKMEkE2QX6 ++ rm /tmp/tmp.gKMPdJBamg /tmp/tmp.HKMEkE2QX6 ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.db8GZSFgHA ++ mktemp + local LAST_ERR=/tmp/tmp.HPZ9B7ZqSl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.db8GZSFgHA Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("4bc04e2b-72e4-4ee6-89ef-1432b210d5e2") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.HPZ9B7ZqSl + rm /tmp/tmp.db8GZSFgHA /tmp/tmp.HPZ9B7ZqSl + return 0 + sleep 2 + write_document + local cmp_postfix= + desc 'write initial data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write initial data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.pitr-11162 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.pitr-11162 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vAzfzkLGy3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.4wFLx3Sjk5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vAzfzkLGy3 ++ cat /tmp/tmp.4wFLx3Sjk5 ++ rm /tmp/tmp.vAzfzkLGy3 /tmp/tmp.4wFLx3Sjk5 ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.U2blAwwCl5 ++ mktemp + local LAST_ERR=/tmp/tmp.ajdgOemq7h + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.U2blAwwCl5 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("bb28a4cf-a0de-4679-973a-d1f342464ee6") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.ajdgOemq7h + rm /tmp/tmp.U2blAwwCl5 /tmp/tmp.ajdgOemq7h + return 0 + minikube_sleep + sleep_time=10 + [[ '' == 1 ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 '' + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 + local postfix= + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uc3KIhy4zz +++ mktemp ++ local LAST_ERR=/tmp/tmp.8l4UTEBp5Y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uc3KIhy4zz ++ cat /tmp/tmp.8l4UTEBp5Y ++ rm /tmp/tmp.uc3KIhy4zz /tmp/tmp.8l4UTEBp5Y ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.JXKXDbtViN ++ mktemp + local LAST_ERR=/tmp/tmp.J0PxwVM621 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JXKXDbtViN + cat /tmp/tmp.J0PxwVM621 + rm /tmp/tmp.JXKXDbtViN /tmp/tmp.J0PxwVM621 + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find.json /tmp/tmp.4exwRjQ5eQ/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 '' + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 + local postfix= + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.k4c2LESdF1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.6ei1PAITWY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.k4c2LESdF1 ++ cat /tmp/tmp.6ei1PAITWY ++ rm /tmp/tmp.k4c2LESdF1 /tmp/tmp.6ei1PAITWY ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.bG6xll2po2 ++ mktemp + local LAST_ERR=/tmp/tmp.cpPmiELYGW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bG6xll2po2 + cat /tmp/tmp.cpPmiELYGW + rm /tmp/tmp.bG6xll2po2 /tmp/tmp.cpPmiELYGW + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find.json /tmp/tmp.4exwRjQ5eQ/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 '' + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 + local postfix= + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.F7RyA9bndR +++ mktemp ++ local LAST_ERR=/tmp/tmp.C7ioSj4Edb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.F7RyA9bndR ++ cat /tmp/tmp.C7ioSj4Edb ++ rm /tmp/tmp.F7RyA9bndR /tmp/tmp.C7ioSj4Edb ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.z0Sq9nyQXN ++ mktemp + local LAST_ERR=/tmp/tmp.MLdWyF2t2Y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.z0Sq9nyQXN + cat /tmp/tmp.MLdWyF2t2Y + rm /tmp/tmp.z0Sq9nyQXN /tmp/tmp.MLdWyF2t2Y + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find.json /tmp/tmp.4exwRjQ5eQ/find + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace some-name-rs0-0 + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace some-name-rs0-1 + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace some-name-rs0-2 + backup_name_minio=backup-minio + run_backup backup-minio 0 + local name=backup-minio + local idx=0 + desc 'run backup backup-minio-0' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-0 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/conf/backup-minio.yml + /usr/bin/sed -e 's/name:/name: backup-minio-0/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.9tVOINKz7G ++ mktemp + local LAST_ERR=/tmp/tmp.6OFNCtB0wc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9tVOINKz7G perconaservermongodbbackup.psmdb.percona.com/backup-minio-0 created + cat /tmp/tmp.6OFNCtB0wc + rm /tmp/tmp.9tVOINKz7G /tmp/tmp.6OFNCtB0wc + return 0 + wait_backup backup-minio-0 + local backup_name=backup-minio-0 + set +o xtrace backup-minio-0................ + write_document -2nd + local cmp_postfix=-2nd + desc 'write initial data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write initial data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.pitr-11162 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.pitr-11162 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WU4xXBIHBu +++ mktemp ++ local LAST_ERR=/tmp/tmp.xmDsBsvTm6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WU4xXBIHBu ++ cat /tmp/tmp.xmDsBsvTm6 ++ rm /tmp/tmp.WU4xXBIHBu /tmp/tmp.xmDsBsvTm6 ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.BpDQmalrqb ++ mktemp + local LAST_ERR=/tmp/tmp.IRRNKURzHJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BpDQmalrqb Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("460733be-ff94-4a29-a879-02f6f475a29e") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.IRRNKURzHJ + rm /tmp/tmp.BpDQmalrqb /tmp/tmp.IRRNKURzHJ + return 0 + minikube_sleep + sleep_time=10 + [[ '' == 1 ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6jMUbasxMY +++ mktemp ++ local LAST_ERR=/tmp/tmp.OahV5kdkaR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6jMUbasxMY ++ cat /tmp/tmp.OahV5kdkaR ++ rm /tmp/tmp.6jMUbasxMY /tmp/tmp.OahV5kdkaR ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.sh0GwEt9jd ++ mktemp + local LAST_ERR=/tmp/tmp.a8abdQzBwP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sh0GwEt9jd + cat /tmp/tmp.a8abdQzBwP + rm /tmp/tmp.sh0GwEt9jd /tmp/tmp.a8abdQzBwP + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find-2nd.json /tmp/tmp.4exwRjQ5eQ/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.d4I3KGoqx8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.WbVkpNrZTm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.d4I3KGoqx8 ++ cat /tmp/tmp.WbVkpNrZTm ++ rm /tmp/tmp.d4I3KGoqx8 /tmp/tmp.WbVkpNrZTm ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.bLwKs255o1 ++ mktemp + local LAST_ERR=/tmp/tmp.sUmtvN2g6d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bLwKs255o1 + cat /tmp/tmp.sUmtvN2g6d + rm /tmp/tmp.bLwKs255o1 /tmp/tmp.sUmtvN2g6d + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find-2nd.json /tmp/tmp.4exwRjQ5eQ/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.L4HgWu0uRo +++ mktemp ++ local LAST_ERR=/tmp/tmp.xtKymzHpZn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.L4HgWu0uRo ++ cat /tmp/tmp.xtKymzHpZn ++ rm /tmp/tmp.L4HgWu0uRo /tmp/tmp.xtKymzHpZn ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.emJA3O0ce6 ++ mktemp + local LAST_ERR=/tmp/tmp.tpFNkrCaP1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.emJA3O0ce6 + cat /tmp/tmp.tpFNkrCaP1 + rm /tmp/tmp.emJA3O0ce6 /tmp/tmp.tpFNkrCaP1 + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find-2nd.json /tmp/tmp.4exwRjQ5eQ/find-2nd + sleep 2 ++ run_mongo 'new Date().toISOString()' myApp:myPass@some-name-rs0.pitr-11162 mongodb '' --quiet ++ local 'command=new Date().toISOString()' ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match' ++ local uri=myApp:myPass@some-name-rs0.pitr-11162 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ cut -c1-19 ++ tr T ' ' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GWIwRfP0aZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RplwFTvvQ3 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.GWIwRfP0aZ +++ cat /tmp/tmp.RplwFTvvQ3 +++ rm /tmp/tmp.GWIwRfP0aZ /tmp/tmp.RplwFTvvQ3 +++ return 0 ++ local client_container=psmdb-client-7469665986-8j874 ++ local mongo_flag=--quiet ++ [[ myApp:myPass@some-name-rs0.pitr-11162 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''new Date().toISOString()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nmO7dl3ehG +++ mktemp ++ local LAST_ERR=/tmp/tmp.aPdrJwqagB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''new Date().toISOString()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nmO7dl3ehG ++ cat /tmp/tmp.aPdrJwqagB ++ rm /tmp/tmp.nmO7dl3ehG /tmp/tmp.aPdrJwqagB ++ return 0 + time_now='2024-06-04 15:45:35' + check_recovery backup-minio-0 date '2024-06-04 15:45:35' -2nd '' some-name + local backup_name=backup-minio-0 + local restore_type=date + local 'restore_date=2024-06-04 15:45:35' + local cmp_postfix=-2nd + local backupSource= + local cluster_name=some-name + desc 'write more data before restore by date' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by date ----------------------------------------------------------------------------------- + sleep 60 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.pitr-11162 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.pitr-11162 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XnS7z8QL70 +++ mktemp ++ local LAST_ERR=/tmp/tmp.nynCK9s9lV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XnS7z8QL70 ++ cat /tmp/tmp.nynCK9s9lV ++ rm /tmp/tmp.XnS7z8QL70 /tmp/tmp.nynCK9s9lV ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.98apP6iN1Z ++ mktemp + local LAST_ERR=/tmp/tmp.h9VT37nUHh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.98apP6iN1Z Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("be863f08-5290-4691-9097-73a5a942e264") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.h9VT37nUHh + rm /tmp/tmp.98apP6iN1Z /tmp/tmp.h9VT37nUHh + return 0 + desc 'waiting for chunks to be uploaded' + set +o xtrace ----------------------------------------------------------------------------------- waiting for chunks to be uploaded ----------------------------------------------------------------------------------- + sleep 120 + desc 'check restore by date' + set +o xtrace ----------------------------------------------------------------------------------- check restore by date ----------------------------------------------------------------------------------- + '[' -z '' ']' + /usr/bin/sed -e 's/type:/type: date/' + /usr/bin/sed -e 's/backupName:/backupName: backup-minio-0/' + '[' -z '2024-06-04 15:45:35' ']' + /usr/bin/sed -e /backupSource/,+2d + kubectl_bin apply -f - + /usr/bin/sed -e 's/date:/date: 2024-06-04 15:45:35/' ++ mktemp + local LAST_OUT=/tmp/tmp.IedOvq8oQU ++ mktemp + local LAST_ERR=/tmp/tmp.Bt2hrRsUvD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/conf/restore.yml + /usr/bin/sed -e 's/name:/name: restore-backup-minio-0/' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IedOvq8oQU perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-0 created + cat /tmp/tmp.Bt2hrRsUvD + rm /tmp/tmp.IedOvq8oQU /tmp/tmp.Bt2hrRsUvD + return 0 + wait_restore backup-minio-0 some-name + local backup_name=backup-minio-0 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=780 + set +o xtrace waiting psmdb-restore/backup-minio-0 to reach ready state........... + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nIWkafrwfD +++ mktemp ++ local LAST_ERR=/tmp/tmp.1UxekcuJNp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nIWkafrwfD ++ cat /tmp/tmp.1UxekcuJNp ++ rm /tmp/tmp.nIWkafrwfD /tmp/tmp.1UxekcuJNp ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + set -o xtrace + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wUHmGKubO2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.pphxvPkeOR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wUHmGKubO2 ++ cat /tmp/tmp.pphxvPkeOR ++ rm /tmp/tmp.wUHmGKubO2 /tmp/tmp.pphxvPkeOR ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.MpsFhvN7Dx ++ mktemp + local LAST_ERR=/tmp/tmp.ELAM7j3gEQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MpsFhvN7Dx + cat /tmp/tmp.ELAM7j3gEQ + rm /tmp/tmp.MpsFhvN7Dx /tmp/tmp.ELAM7j3gEQ + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find-2nd.json /tmp/tmp.4exwRjQ5eQ/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Oyjl6lDdkT +++ mktemp ++ local LAST_ERR=/tmp/tmp.EJYY368jdc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Oyjl6lDdkT ++ cat /tmp/tmp.EJYY368jdc ++ rm /tmp/tmp.Oyjl6lDdkT /tmp/tmp.EJYY368jdc ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ZwAXuF5z4Y ++ mktemp + local LAST_ERR=/tmp/tmp.0AEG6CcyJr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZwAXuF5z4Y + cat /tmp/tmp.0AEG6CcyJr + rm /tmp/tmp.ZwAXuF5z4Y /tmp/tmp.0AEG6CcyJr + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find-2nd.json /tmp/tmp.4exwRjQ5eQ/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GSHf4qAJW7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.JkvCtFYkpm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GSHf4qAJW7 ++ cat /tmp/tmp.JkvCtFYkpm ++ rm /tmp/tmp.GSHf4qAJW7 /tmp/tmp.JkvCtFYkpm ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.EfunKPArtg ++ mktemp + local LAST_ERR=/tmp/tmp.73PFQSaIbi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EfunKPArtg + cat /tmp/tmp.73PFQSaIbi + rm /tmp/tmp.EfunKPArtg /tmp/tmp.73PFQSaIbi + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find-2nd.json /tmp/tmp.4exwRjQ5eQ/find-2nd + run_backup backup-minio 1 + local name=backup-minio + local idx=1 + desc 'run backup backup-minio-1' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-1 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/conf/backup-minio.yml + kubectl_bin apply -f - ++ mktemp + /usr/bin/sed -e 's/name:/name: backup-minio-1/' + local LAST_OUT=/tmp/tmp.gRslulu8wc ++ mktemp + local LAST_ERR=/tmp/tmp.hGfJvQLF2b + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gRslulu8wc perconaservermongodbbackup.psmdb.percona.com/backup-minio-1 created + cat /tmp/tmp.hGfJvQLF2b + rm /tmp/tmp.gRslulu8wc /tmp/tmp.hGfJvQLF2b + return 0 + wait_backup backup-minio-1 + local backup_name=backup-minio-1 + set +o xtrace backup-minio-1............... + compare_latest_restorable_time some-name-rs0 backup-minio-1 + local cluster=some-name-rs0 + local backup_name=backup-minio-1 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rHgq62H1QM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GsmWAjfIRd +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.rHgq62H1QM +++ cat /tmp/tmp.GsmWAjfIRd +++ rm /tmp/tmp.rHgq62H1QM /tmp/tmp.GsmWAjfIRd +++ return 0 ++ first_timestamp=1717516123 ++ sleep 5 ++ [[ 1717516123 != '' ]] ++ [[ 1717516123 != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ local LAST_OUT=/tmp/tmp.7lGMhx1woe ++++ mktemp +++ local LAST_ERR=/tmp/tmp.oV0cur7sYs +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.7lGMhx1woe +++ cat /tmp/tmp.oV0cur7sYs +++ rm /tmp/tmp.7lGMhx1woe /tmp/tmp.oV0cur7sYs +++ return 0 ++ second_timestamp=1717516123 ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 1717516123 != '' ]] ++ [[ 1717516123 != \n\u\l\l ]] ++ [[ 1717516123 == 1717516123 ]] ++ /usr/bin/date -u -d @1717516123 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2024-06-04T15:48:43Z ++ get_latest_restorable_time_from_backup_object backup-minio-1 ++ local backup_name=backup-minio-1 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-1 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.s9MMCB8H0D ++++ mktemp +++ local LAST_ERR=/tmp/tmp.i7zoFwsMhx +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb-backup backup-minio-1 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.s9MMCB8H0D +++ cat /tmp/tmp.i7zoFwsMhx +++ rm /tmp/tmp.s9MMCB8H0D /tmp/tmp.i7zoFwsMhx +++ return 0 ++ latestRestorableTime=2024-06-04T15:48:43Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2024-06-04T15:48:43Z != '' ]] ++ [[ 2024-06-04T15:48:43Z != \n\u\l\l ]] ++ echo 2024-06-04T15:48:43Z + backup_time=2024-06-04T15:48:43Z + [[ 2024-06-04T15:48:43Z != \2\0\2\4\-\0\6\-\0\4\T\1\5\:\4\8\:\4\3\Z ]] + check_recovery backup-minio-1 latest '' -3rd '' some-name + local backup_name=backup-minio-1 + local restore_type=latest + local restore_date= + local cmp_postfix=-3rd + local backupSource= + local cluster_name=some-name + desc 'write more data before restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by latest ----------------------------------------------------------------------------------- + sleep 60 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.pitr-11162 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.pitr-11162 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OjbFlMTgc9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.AA4AxxVjaJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OjbFlMTgc9 ++ cat /tmp/tmp.AA4AxxVjaJ ++ rm /tmp/tmp.OjbFlMTgc9 /tmp/tmp.AA4AxxVjaJ ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.EiMxsJ359x ++ mktemp + local LAST_ERR=/tmp/tmp.ZMkfM8uZRW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EiMxsJ359x Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("597bbef2-eff6-4c01-abb2-5245645e7da4") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.ZMkfM8uZRW + rm /tmp/tmp.EiMxsJ359x /tmp/tmp.ZMkfM8uZRW + return 0 + desc 'waiting for chunks to be uploaded' + set +o xtrace ----------------------------------------------------------------------------------- waiting for chunks to be uploaded ----------------------------------------------------------------------------------- + sleep 120 + desc 'check restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- check restore by latest ----------------------------------------------------------------------------------- + '[' -z '' ']' + /usr/bin/sed -e 's/name:/name: restore-backup-minio-1/' + /usr/bin/sed -e 's/backupName:/backupName: backup-minio-1/' + /usr/bin/sed -e 's/type:/type: latest/' + '[' -z '' ']' + /usr/bin/sed -e /backupSource/,+2d + /usr/bin/sed -e /date:/d + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/conf/restore.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.0uDBhUcndw ++ mktemp + local LAST_ERR=/tmp/tmp.E5NVosbGYj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0uDBhUcndw perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-1 created + cat /tmp/tmp.E5NVosbGYj + rm /tmp/tmp.0uDBhUcndw /tmp/tmp.E5NVosbGYj + return 0 + wait_restore backup-minio-1 some-name + local backup_name=backup-minio-1 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=780 + set +o xtrace waiting psmdb-restore/backup-minio-1 to reach ready state........... + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OxLh4UiFsk +++ mktemp ++ local LAST_ERR=/tmp/tmp.ij4W1So4yh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OxLh4UiFsk ++ cat /tmp/tmp.ij4W1So4yh ++ rm /tmp/tmp.OxLh4UiFsk /tmp/tmp.ij4W1So4yh ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + set -o xtrace + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 -3rd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tKvEG395Up +++ mktemp ++ local LAST_ERR=/tmp/tmp.R9ebx2FrJT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tKvEG395Up ++ cat /tmp/tmp.R9ebx2FrJT ++ rm /tmp/tmp.tKvEG395Up /tmp/tmp.R9ebx2FrJT ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.0NKHpCYOHN ++ mktemp + local LAST_ERR=/tmp/tmp.qT6neJDrET + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0NKHpCYOHN + cat /tmp/tmp.qT6neJDrET + rm /tmp/tmp.0NKHpCYOHN /tmp/tmp.qT6neJDrET + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find-3rd.json /tmp/tmp.4exwRjQ5eQ/find-3rd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 -3rd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.829sWrrHxP +++ mktemp ++ local LAST_ERR=/tmp/tmp.ompLhRJRTH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.829sWrrHxP ++ cat /tmp/tmp.ompLhRJRTH ++ rm /tmp/tmp.829sWrrHxP /tmp/tmp.ompLhRJRTH ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.zz4q4y1Vh7 ++ mktemp + local LAST_ERR=/tmp/tmp.CzM8LPy4uf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zz4q4y1Vh7 + cat /tmp/tmp.CzM8LPy4uf + rm /tmp/tmp.zz4q4y1Vh7 /tmp/tmp.CzM8LPy4uf + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find-3rd.json /tmp/tmp.4exwRjQ5eQ/find-3rd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 -3rd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KkqXJ82r8p +++ mktemp ++ local LAST_ERR=/tmp/tmp.NehOLnNaOt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KkqXJ82r8p ++ cat /tmp/tmp.NehOLnNaOt ++ rm /tmp/tmp.KkqXJ82r8p /tmp/tmp.NehOLnNaOt ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.fIwHouuZdi ++ mktemp + local LAST_ERR=/tmp/tmp.EhqvJifNZA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fIwHouuZdi + cat /tmp/tmp.EhqvJifNZA + rm /tmp/tmp.fIwHouuZdi /tmp/tmp.EhqvJifNZA + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find-3rd.json /tmp/tmp.4exwRjQ5eQ/find-3rd + desc 'restore pitr using backupSource' + set +o xtrace ----------------------------------------------------------------------------------- restore pitr using backupSource ----------------------------------------------------------------------------------- + reset_collection + desc 'reset data' + set +o xtrace ----------------------------------------------------------------------------------- reset data ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.remove({})' myApp:myPass@some-name-rs0.pitr-11162 + local 'command=use myApp\n db.test.remove({})' + local uri=myApp:myPass@some-name-rs0.pitr-11162 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dVXaJ3kN4Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.JtP7nx7ztG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dVXaJ3kN4Q ++ cat /tmp/tmp.JtP7nx7ztG ++ rm /tmp/tmp.dVXaJ3kN4Q /tmp/tmp.JtP7nx7ztG ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.remove({})\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.KUvI8YvCpz ++ mktemp + local LAST_ERR=/tmp/tmp.7IBEcYqkNa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.remove({})\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KUvI8YvCpz Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("61914344-c3eb-4e1d-ad49-58f97b0d9a2e") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nRemoved" : 3 }) bye + cat /tmp/tmp.7IBEcYqkNa + rm /tmp/tmp.KUvI8YvCpz /tmp/tmp.7IBEcYqkNa + return 0 + sleep 2 + write_document + local cmp_postfix= + desc 'write initial data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write initial data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.pitr-11162 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.pitr-11162 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nH065cHFGb +++ mktemp ++ local LAST_ERR=/tmp/tmp.WSA4M9Ax3g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nH065cHFGb ++ cat /tmp/tmp.WSA4M9Ax3g ++ rm /tmp/tmp.nH065cHFGb /tmp/tmp.WSA4M9Ax3g ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.CHyXQ2vGgs ++ mktemp + local LAST_ERR=/tmp/tmp.Cfxv90X8Hb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CHyXQ2vGgs Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("b3f3a72e-6729-4192-8b95-75f3d1bd98ab") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.Cfxv90X8Hb + rm /tmp/tmp.CHyXQ2vGgs /tmp/tmp.Cfxv90X8Hb + return 0 + minikube_sleep + sleep_time=10 + [[ '' == 1 ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 '' + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 + local postfix= + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uVGGfzCOcf +++ mktemp ++ local LAST_ERR=/tmp/tmp.uFIruDBdd6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uVGGfzCOcf ++ cat /tmp/tmp.uFIruDBdd6 ++ rm /tmp/tmp.uVGGfzCOcf /tmp/tmp.uFIruDBdd6 ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.nJs2ouuVDE ++ mktemp + local LAST_ERR=/tmp/tmp.QiDBt6q11b + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nJs2ouuVDE + cat /tmp/tmp.QiDBt6q11b + rm /tmp/tmp.nJs2ouuVDE /tmp/tmp.QiDBt6q11b + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find.json /tmp/tmp.4exwRjQ5eQ/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 '' + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 + local postfix= + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ra625uepcv +++ mktemp ++ local LAST_ERR=/tmp/tmp.WYcdop6UvR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ra625uepcv ++ cat /tmp/tmp.WYcdop6UvR ++ rm /tmp/tmp.Ra625uepcv /tmp/tmp.WYcdop6UvR ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.3rHc8bgV2e ++ mktemp + local LAST_ERR=/tmp/tmp.5a2uqsc06Y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3rHc8bgV2e + cat /tmp/tmp.5a2uqsc06Y + rm /tmp/tmp.3rHc8bgV2e /tmp/tmp.5a2uqsc06Y + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find.json /tmp/tmp.4exwRjQ5eQ/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 '' + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 + local postfix= + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Vs5UFapRKF +++ mktemp ++ local LAST_ERR=/tmp/tmp.PsyaUaYpOA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Vs5UFapRKF ++ cat /tmp/tmp.PsyaUaYpOA ++ rm /tmp/tmp.Vs5UFapRKF /tmp/tmp.PsyaUaYpOA ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.CTTu3QnEnK ++ mktemp + local LAST_ERR=/tmp/tmp.xtrnjwZMtk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CTTu3QnEnK + cat /tmp/tmp.xtrnjwZMtk + rm /tmp/tmp.CTTu3QnEnK /tmp/tmp.xtrnjwZMtk + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find.json /tmp/tmp.4exwRjQ5eQ/find + run_backup backup-minio 2 + local name=backup-minio + local idx=2 + desc 'run backup backup-minio-2' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-2 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/conf/backup-minio.yml + /usr/bin/sed -e 's/name:/name: backup-minio-2/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.jKAm4iXzzj ++ mktemp + local LAST_ERR=/tmp/tmp.z8S3o6QgT0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jKAm4iXzzj perconaservermongodbbackup.psmdb.percona.com/backup-minio-2 created + cat /tmp/tmp.z8S3o6QgT0 + rm /tmp/tmp.jKAm4iXzzj /tmp/tmp.z8S3o6QgT0 + return 0 + wait_backup backup-minio-2 + local backup_name=backup-minio-2 + set +o xtrace backup-minio-2.................. + compare_latest_restorable_time some-name-rs0 backup-minio-2 + local cluster=some-name-rs0 + local backup_name=backup-minio-2 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5wQU6ncfdq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.s0MlP8NkND +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.5wQU6ncfdq +++ cat /tmp/tmp.s0MlP8NkND +++ rm /tmp/tmp.5wQU6ncfdq /tmp/tmp.s0MlP8NkND +++ return 0 ++ first_timestamp=1717516403 ++ sleep 5 ++ [[ 1717516403 != '' ]] ++ [[ 1717516403 != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8hKU2dJy8r ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pGGFHLNWkP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.8hKU2dJy8r +++ cat /tmp/tmp.pGGFHLNWkP +++ rm /tmp/tmp.8hKU2dJy8r /tmp/tmp.pGGFHLNWkP +++ return 0 ++ second_timestamp=1717516403 ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 1717516403 != '' ]] ++ [[ 1717516403 != \n\u\l\l ]] ++ [[ 1717516403 == 1717516403 ]] ++ /usr/bin/date -u -d @1717516403 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2024-06-04T15:53:23Z ++ get_latest_restorable_time_from_backup_object backup-minio-2 ++ local backup_name=backup-minio-2 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-2 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FA7qUXwdwE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1LsFZEm6uT +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb-backup backup-minio-2 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.FA7qUXwdwE +++ cat /tmp/tmp.1LsFZEm6uT +++ rm /tmp/tmp.FA7qUXwdwE /tmp/tmp.1LsFZEm6uT +++ return 0 ++ latestRestorableTime=2024-06-04T15:53:23Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2024-06-04T15:53:23Z != '' ]] ++ [[ 2024-06-04T15:53:23Z != \n\u\l\l ]] ++ echo 2024-06-04T15:53:23Z + backup_time=2024-06-04T15:53:23Z + [[ 2024-06-04T15:53:23Z != \2\0\2\4\-\0\6\-\0\4\T\1\5\:\5\3\:\2\3\Z ]] ++ cut -c1-19 ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match' ++ tr T ' ' ++ run_mongo 'new Date().toISOString()' myApp:myPass@some-name-rs0.pitr-11162 mongodb '' --quiet ++ local 'command=new Date().toISOString()' ++ local uri=myApp:myPass@some-name-rs0.pitr-11162 ++ local driver=mongodb ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pYVkWg2Pxi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PZXbfLp09u +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.pYVkWg2Pxi +++ cat /tmp/tmp.PZXbfLp09u +++ rm /tmp/tmp.pYVkWg2Pxi /tmp/tmp.PZXbfLp09u +++ return 0 ++ local client_container=psmdb-client-7469665986-8j874 ++ local mongo_flag=--quiet ++ [[ myApp:myPass@some-name-rs0.pitr-11162 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''new Date().toISOString()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.djul1gpw35 +++ mktemp ++ local LAST_ERR=/tmp/tmp.uerN3ukXfL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''new Date().toISOString()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.djul1gpw35 ++ cat /tmp/tmp.uerN3ukXfL ++ rm /tmp/tmp.djul1gpw35 /tmp/tmp.uerN3ukXfL ++ return 0 + time_now='2024-06-04 15:55:15' + check_recovery backup-minio-2 date '2024-06-04 15:55:15' '' backupSource some-name + local backup_name=backup-minio-2 + local restore_type=date + local 'restore_date=2024-06-04 15:55:15' + local cmp_postfix= + local backupSource=backupSource + local cluster_name=some-name + desc 'write more data before restore by date' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by date ----------------------------------------------------------------------------------- + sleep 60 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.pitr-11162 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.pitr-11162 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cIRyj59Vvn +++ mktemp ++ local LAST_ERR=/tmp/tmp.1n1DdTIc23 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cIRyj59Vvn ++ cat /tmp/tmp.1n1DdTIc23 ++ rm /tmp/tmp.cIRyj59Vvn /tmp/tmp.1n1DdTIc23 ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.PFeDGXJF8I ++ mktemp + local LAST_ERR=/tmp/tmp.0QS2IcVbNY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PFeDGXJF8I Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("4c8ac0ab-4d56-42d7-93c2-b58d6b027dbb") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.0QS2IcVbNY + rm /tmp/tmp.PFeDGXJF8I /tmp/tmp.0QS2IcVbNY + return 0 + desc 'waiting for chunks to be uploaded' + set +o xtrace ----------------------------------------------------------------------------------- waiting for chunks to be uploaded ----------------------------------------------------------------------------------- + sleep 120 + desc 'check restore by date' + set +o xtrace ----------------------------------------------------------------------------------- check restore by date ----------------------------------------------------------------------------------- + '[' -z backupSource ']' ++ get_backup_dest backup-minio-2 ++ local backup_name=backup-minio-2 ++ kubectl_bin get psmdb-backup backup-minio-2 -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Dgd5SWCtpn +++ mktemp ++ local LAST_ERR=/tmp/tmp.DkkzToUjrO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-minio-2 -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Dgd5SWCtpn ++ cat /tmp/tmp.DkkzToUjrO ++ rm /tmp/tmp.Dgd5SWCtpn /tmp/tmp.DkkzToUjrO ++ return 0 + backup_dest=operator-testing/2024-06-04T15:54:41Z + /usr/bin/sed -e /backupName/d + /usr/bin/sed -e 's/name:/name: restore-backup-minio-2/' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/conf/restore.yml + kubectl_bin apply -f - ++ mktemp + /usr/bin/sed -e 's/type:/type: date/' + '[' -z '2024-06-04 15:55:15' ']' + local LAST_OUT=/tmp/tmp.5htSx4AXWK + /usr/bin/sed -e 's/date:/date: 2024-06-04 15:55:15/' ++ mktemp + local LAST_ERR=/tmp/tmp.0DjFq7XX8O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's|BACKUP-NAME|operator-testing/2024-06-04T15:54:41Z|' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5htSx4AXWK perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-2 created + cat /tmp/tmp.0DjFq7XX8O + rm /tmp/tmp.5htSx4AXWK /tmp/tmp.0DjFq7XX8O + return 0 + wait_restore backup-minio-2 some-name + local backup_name=backup-minio-2 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=780 + set +o xtrace waiting psmdb-restore/backup-minio-2 to reach ready state........... + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.y8n7F1CUCR +++ mktemp ++ local LAST_ERR=/tmp/tmp.wPKfclaymz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.y8n7F1CUCR ++ cat /tmp/tmp.wPKfclaymz ++ rm /tmp/tmp.y8n7F1CUCR /tmp/tmp.wPKfclaymz ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + set -o xtrace + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 '' + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 + local postfix= + local suffix= + local database=myApp + local collection=test + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local LAST_OUT=/tmp/tmp.lceXXbDFrp +++ mktemp ++ local LAST_ERR=/tmp/tmp.Eha4gwjHbv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lceXXbDFrp ++ cat /tmp/tmp.Eha4gwjHbv ++ rm /tmp/tmp.lceXXbDFrp /tmp/tmp.Eha4gwjHbv ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.qoXX6rgKem ++ mktemp + local LAST_ERR=/tmp/tmp.JDCuEqwZyy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qoXX6rgKem + cat /tmp/tmp.JDCuEqwZyy + rm /tmp/tmp.qoXX6rgKem /tmp/tmp.JDCuEqwZyy + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find.json /tmp/tmp.4exwRjQ5eQ/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 '' + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 + local postfix= + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vgivc37vNw +++ mktemp ++ local LAST_ERR=/tmp/tmp.hBYQ4jhrLj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vgivc37vNw ++ cat /tmp/tmp.hBYQ4jhrLj ++ rm /tmp/tmp.vgivc37vNw /tmp/tmp.hBYQ4jhrLj ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.F1iVhO5fm0 ++ mktemp + local LAST_ERR=/tmp/tmp.F2HYurl3Yw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.F1iVhO5fm0 + cat /tmp/tmp.F2HYurl3Yw + rm /tmp/tmp.F1iVhO5fm0 /tmp/tmp.F2HYurl3Yw + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find.json /tmp/tmp.4exwRjQ5eQ/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 '' + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 + local postfix= + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.trmoXzWbO0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.7HBH6KWNCh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.trmoXzWbO0 ++ cat /tmp/tmp.7HBH6KWNCh ++ rm /tmp/tmp.trmoXzWbO0 /tmp/tmp.7HBH6KWNCh ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ER6Rkrjfvc ++ mktemp + local LAST_ERR=/tmp/tmp.74fxDL1e4T + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ER6Rkrjfvc + cat /tmp/tmp.74fxDL1e4T + rm /tmp/tmp.ER6Rkrjfvc /tmp/tmp.74fxDL1e4T + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find.json /tmp/tmp.4exwRjQ5eQ/find + run_backup backup-minio 3 + local name=backup-minio + local idx=3 + desc 'run backup backup-minio-3' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-3 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/conf/backup-minio.yml + /usr/bin/sed -e 's/name:/name: backup-minio-3/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.yiJUEKr9St ++ mktemp + local LAST_ERR=/tmp/tmp.g0dvw8xOWz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yiJUEKr9St perconaservermongodbbackup.psmdb.percona.com/backup-minio-3 created + cat /tmp/tmp.g0dvw8xOWz + rm /tmp/tmp.yiJUEKr9St /tmp/tmp.g0dvw8xOWz + return 0 + wait_backup backup-minio-3 + local backup_name=backup-minio-3 + set +o xtrace backup-minio-3................ + compare_latest_restorable_time some-name-rs0 backup-minio-3 + local cluster=some-name-rs0 + local backup_name=backup-minio-3 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aUMs1ezIig ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IKegM59mxx +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.aUMs1ezIig +++ cat /tmp/tmp.IKegM59mxx +++ rm /tmp/tmp.aUMs1ezIig /tmp/tmp.IKegM59mxx +++ return 0 ++ first_timestamp=1717516708 ++ sleep 5 ++ [[ 1717516708 != '' ]] ++ [[ 1717516708 != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NG5Eu36Ajx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.R4EI1mmj8q +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.NG5Eu36Ajx +++ cat /tmp/tmp.R4EI1mmj8q +++ rm /tmp/tmp.NG5Eu36Ajx /tmp/tmp.R4EI1mmj8q +++ return 0 ++ second_timestamp=1717516708 ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 1717516708 != '' ]] ++ [[ 1717516708 != \n\u\l\l ]] ++ [[ 1717516708 == 1717516708 ]] ++ /usr/bin/date -u -d @1717516708 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2024-06-04T15:58:28Z ++ get_latest_restorable_time_from_backup_object backup-minio-3 ++ local backup_name=backup-minio-3 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-3 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.re3PipOYY1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.HkqPtq6GTz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb-backup backup-minio-3 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.re3PipOYY1 +++ cat /tmp/tmp.HkqPtq6GTz +++ rm /tmp/tmp.re3PipOYY1 /tmp/tmp.HkqPtq6GTz +++ return 0 ++ latestRestorableTime=2024-06-04T15:58:28Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2024-06-04T15:58:28Z != '' ]] ++ [[ 2024-06-04T15:58:28Z != \n\u\l\l ]] ++ echo 2024-06-04T15:58:28Z + backup_time=2024-06-04T15:58:28Z + [[ 2024-06-04T15:58:28Z != \2\0\2\4\-\0\6\-\0\4\T\1\5\:\5\8\:\2\8\Z ]] + check_recovery backup-minio-3 latest '' -4th backupSource some-name + local backup_name=backup-minio-3 + local restore_type=latest + local restore_date= + local cmp_postfix=-4th + local backupSource=backupSource + local cluster_name=some-name + desc 'write more data before restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by latest ----------------------------------------------------------------------------------- + sleep 60 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.pitr-11162 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.pitr-11162 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zyCXzsAnyl +++ mktemp ++ local LAST_ERR=/tmp/tmp.VeZmWYgOOa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zyCXzsAnyl ++ cat /tmp/tmp.VeZmWYgOOa ++ rm /tmp/tmp.zyCXzsAnyl /tmp/tmp.VeZmWYgOOa ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.8KYelWRU2Q ++ mktemp + local LAST_ERR=/tmp/tmp.pVcUp58XaN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8KYelWRU2Q Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("4b6a1b9d-c7e4-44e8-8867-2fb744e84df6") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.pVcUp58XaN + rm /tmp/tmp.8KYelWRU2Q /tmp/tmp.pVcUp58XaN + return 0 + desc 'waiting for chunks to be uploaded' + set +o xtrace ----------------------------------------------------------------------------------- waiting for chunks to be uploaded ----------------------------------------------------------------------------------- + sleep 120 + desc 'check restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- check restore by latest ----------------------------------------------------------------------------------- + '[' -z backupSource ']' ++ get_backup_dest backup-minio-3 ++ local backup_name=backup-minio-3 ++ sed -e 's/.json$//' ++ sed 's|azure://||' ++ sed 's|s3://||' ++ kubectl_bin get psmdb-backup backup-minio-3 -o 'jsonpath={.status.destination}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.maeq8tpfqt +++ mktemp ++ local LAST_ERR=/tmp/tmp.OHI91D48Qk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-minio-3 -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.maeq8tpfqt ++ cat /tmp/tmp.OHI91D48Qk ++ rm /tmp/tmp.maeq8tpfqt /tmp/tmp.OHI91D48Qk ++ return 0 + backup_dest=operator-testing/2024-06-04T15:59:28Z + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/conf/restore.yml + /usr/bin/sed -e 's/name:/name: restore-backup-minio-3/' + /usr/bin/sed -e /backupName/d + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.GC2dIOWtWc ++ mktemp + local LAST_ERR=/tmp/tmp.eefwhuQtst + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's/type:/type: latest/' + '[' -z '' ']' + /usr/bin/sed -e /date:/d + /usr/bin/sed -e 's|BACKUP-NAME|operator-testing/2024-06-04T15:59:28Z|' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GC2dIOWtWc perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-3 created + cat /tmp/tmp.eefwhuQtst + rm /tmp/tmp.GC2dIOWtWc /tmp/tmp.eefwhuQtst + return 0 + wait_restore backup-minio-3 some-name + local backup_name=backup-minio-3 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=780 + set +o xtrace waiting psmdb-restore/backup-minio-3 to reach ready state........... + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.r8iJZ4NWLm +++ mktemp ++ local LAST_ERR=/tmp/tmp.2a0NyMx5dd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.r8iJZ4NWLm ++ cat /tmp/tmp.2a0NyMx5dd ++ rm /tmp/tmp.r8iJZ4NWLm /tmp/tmp.2a0NyMx5dd ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + set -o xtrace + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 -4th + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.l0Dux9uChp +++ mktemp ++ local LAST_ERR=/tmp/tmp.619xp1CoYY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.l0Dux9uChp ++ cat /tmp/tmp.619xp1CoYY ++ rm /tmp/tmp.l0Dux9uChp /tmp/tmp.619xp1CoYY ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.U1KCNWCWYE ++ mktemp + local LAST_ERR=/tmp/tmp.t4VeRuxtiY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.U1KCNWCWYE + cat /tmp/tmp.t4VeRuxtiY + rm /tmp/tmp.U1KCNWCWYE /tmp/tmp.t4VeRuxtiY + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.4exwRjQ5eQ/find-4th + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 -4th + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2skdL0RWCZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.7ptpHQPraF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2skdL0RWCZ ++ cat /tmp/tmp.7ptpHQPraF ++ rm /tmp/tmp.2skdL0RWCZ /tmp/tmp.7ptpHQPraF ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.OGQB2Xo8wT ++ mktemp + local LAST_ERR=/tmp/tmp.XpEcBco3h2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OGQB2Xo8wT + cat /tmp/tmp.XpEcBco3h2 + rm /tmp/tmp.OGQB2Xo8wT /tmp/tmp.XpEcBco3h2 + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.4exwRjQ5eQ/find-4th + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 -4th + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 + local postfix=-4th + local suffix= + local database=myApp + local collection=test + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1YJJaAx3Hz +++ mktemp ++ local LAST_ERR=/tmp/tmp.YhXTCEV0mT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1YJJaAx3Hz ++ cat /tmp/tmp.YhXTCEV0mT ++ rm /tmp/tmp.1YJJaAx3Hz /tmp/tmp.YhXTCEV0mT ++ return 0 + local client_container=psmdb-client-7469665986-8j874 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.dpLIgTu7qN ++ mktemp + local LAST_ERR=/tmp/tmp.thBi2wXeUc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8j874 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.pitr-11162.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dpLIgTu7qN + cat /tmp/tmp.thBi2wXeUc + rm /tmp/tmp.dpLIgTu7qN /tmp/tmp.thBi2wXeUc + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/e2e-tests/pitr/compare/find-4th.json /tmp/tmp.4exwRjQ5eQ/find-4th + kubectl patch psmdb some-name --type=merge --patch '{"spec": {"backup": {"pitr": {"enabled": false}}}}' perconaservermongodb.psmdb.percona.com/some-name patched + sleep 20 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.tiKjzLE7ki ++ mktemp + local LAST_ERR=/tmp/tmp.5hLggycrV6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tiKjzLE7ki perconaservermongodbbackup.psmdb.percona.com "backup-minio-0" deleted perconaservermongodbbackup.psmdb.percona.com "backup-minio-1" deleted perconaservermongodbbackup.psmdb.percona.com "backup-minio-2" deleted perconaservermongodbbackup.psmdb.percona.com "backup-minio-3" deleted + cat /tmp/tmp.5hLggycrV6 + rm /tmp/tmp.tiKjzLE7ki /tmp/tmp.5hLggycrV6 + return 0 + destroy pitr-11162 + local namespace=pitr-11162 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.Lu3V4b6q4F ++ mktemp + local LAST_ERR=/tmp/tmp.opaJkSR3t8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Lu3V4b6q4F customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.opaJkSR3t8 + rm /tmp/tmp.Lu3V4b6q4F /tmp/tmp.opaJkSR3t8 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' E0604 16:04:56.989404 8380 memcache.go:287] couldn't get resource list for psmdb.percona.com/v1: the server could not find the requested resource E0604 16:04:57.111368 8380 memcache.go:287] couldn't get resource list for psmdb.percona.com/v1-12-0: the server could not find the requested resource E0604 16:04:57.114343 8380 memcache.go:287] couldn't get resource list for psmdb.percona.com/v1-10-0: the server could not find the requested resource E0604 16:04:57.115547 8380 memcache.go:287] couldn't get resource list for psmdb.percona.com/v1-11-0: the server could not find the requested resource error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.XKpDsShPRM ++ mktemp + local LAST_ERR=/tmp/tmp.9kdYdHxgoS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XKpDsShPRM + cat /tmp/tmp.9kdYdHxgoS + rm /tmp/tmp.XKpDsShPRM /tmp/tmp.9kdYdHxgoS + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.vTDApesnEs ++ mktemp + local LAST_ERR=/tmp/tmp.6Pv5pV6sE6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vTDApesnEs + cat /tmp/tmp.6Pv5pV6sE6 + rm /tmp/tmp.vTDApesnEs /tmp/tmp.6Pv5pV6sE6 + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.Z2UIA19yCf ++ mktemp + local LAST_ERR=/tmp/tmp.GywPo67e9F + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Z2UIA19yCf + cat /tmp/tmp.GywPo67e9F + rm /tmp/tmp.Z2UIA19yCf /tmp/tmp.GywPo67e9F + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.wfJVg5PYDZ ++ mktemp + local LAST_ERR=/tmp/tmp.SH0lqpopNS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1560/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wfJVg5PYDZ clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.SH0lqpopNS + rm /tmp/tmp.wfJVg5PYDZ /tmp/tmp.SH0lqpopNS + return 0 + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml + : + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace pitr-11162 + rm -rf /tmp/tmp.4exwRjQ5eQ + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.12WkjfHn3s ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.rSS7FXwomr + local LAST_ERR=/tmp/tmp.EB3ILCLGE1 + local exit_status=0 + local timeout=4 ++ mktemp + local LAST_ERR=/tmp/tmp.Ev2ukyjGA5 + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pitr-11162