Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/logs/pitr-sharded.log WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 + main + create_infra pitr-sharded-26322 + local ns=pitr-sharded-26322 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.LtIS9ZM2wV ++ mktemp + local LAST_ERR=/tmp/tmp.i3ARSPBD92 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LtIS9ZM2wV customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.i3ARSPBD92 + rm /tmp/tmp.LtIS9ZM2wV /tmp/tmp.i3ARSPBD92 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.zmD2umCAxY ++ mktemp + local LAST_ERR=/tmp/tmp.LkCP8JWAoG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zmD2umCAxY + cat /tmp/tmp.LkCP8JWAoG + rm /tmp/tmp.zmD2umCAxY /tmp/tmp.LkCP8JWAoG + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.I6dly1JXcE ++ mktemp + local LAST_ERR=/tmp/tmp.CcpmaED21K + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I6dly1JXcE + cat /tmp/tmp.CcpmaED21K + rm /tmp/tmp.I6dly1JXcE /tmp/tmp.CcpmaED21K + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ttZPT4SFFm ++ mktemp + local LAST_ERR=/tmp/tmp.XeyuMXSE5D + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ttZPT4SFFm + cat /tmp/tmp.XeyuMXSE5D + rm /tmp/tmp.ttZPT4SFFm /tmp/tmp.XeyuMXSE5D + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.douKhFD1Oe ++ mktemp + local LAST_ERR=/tmp/tmp.0trb2xPHpj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.douKhFD1Oe clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.0trb2xPHpj + rm /tmp/tmp.douKhFD1Oe /tmp/tmp.0trb2xPHpj + return 0 + check_crd_for_deletion PR-1566-1480088f + local git_tag=PR-1566-1480088f ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1566-1480088f/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed s/---//g ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TU20PmA1Fz +++ mktemp ++ local LAST_ERR=/tmp/tmp.AuSQbsjuYT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.TU20PmA1Fz ++ cat /tmp/tmp.AuSQbsjuYT Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.TU20PmA1Fz ++ cat /tmp/tmp.AuSQbsjuYT Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.TU20PmA1Fz ++ cat /tmp/tmp.AuSQbsjuYT Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.TU20PmA1Fz ++ cat /tmp/tmp.AuSQbsjuYT Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.TU20PmA1Fz /tmp/tmp.AuSQbsjuYT ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ awk '{print $1}' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.qqQzZ5tLFn + kubectl_bin get ns ++ mktemp + local LAST_ERR=/tmp/tmp.Kud5tAwuQA + local exit_status=0 + local timeout=4 ++ mktemp + local LAST_OUT=/tmp/tmp.NpIKkvBxG3 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + awk '{print$1}' + xargs kubectl delete ns ++ mktemp + local LAST_ERR=/tmp/tmp.ZIFH8Fkpbp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NpIKkvBxG3 + cat /tmp/tmp.ZIFH8Fkpbp + rm /tmp/tmp.NpIKkvBxG3 /tmp/tmp.ZIFH8Fkpbp + return 0 namespace "pitr-sharded-25070" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qqQzZ5tLFn namespace "psmdb-operator" deleted + cat /tmp/tmp.Kud5tAwuQA + rm /tmp/tmp.qqQzZ5tLFn /tmp/tmp.Kud5tAwuQA + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.7y010e2ZOx ++ mktemp + local LAST_ERR=/tmp/tmp.Sbcbs8UTgU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7y010e2ZOx + cat /tmp/tmp.Sbcbs8UTgU + rm /tmp/tmp.7y010e2ZOx /tmp/tmp.Sbcbs8UTgU + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.YqOMF8UaVL ++ mktemp + local LAST_ERR=/tmp/tmp.oE24iv13SR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YqOMF8UaVL namespace/psmdb-operator created + cat /tmp/tmp.oE24iv13SR + rm /tmp/tmp.YqOMF8UaVL /tmp/tmp.oE24iv13SR + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.htZkdZadBU +++ mktemp ++ local LAST_ERR=/tmp/tmp.bNvFx3X4nQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.htZkdZadBU ++ cat /tmp/tmp.bNvFx3X4nQ ++ rm /tmp/tmp.htZkdZadBU /tmp/tmp.bNvFx3X4nQ ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1566-1480088f-7-cluster7 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.jARxjRt4DQ ++ mktemp + local LAST_ERR=/tmp/tmp.vR21VLcWYR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1566-1480088f-7-cluster7 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jARxjRt4DQ Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1566-1480088f-7-cluster7" modified. + cat /tmp/tmp.vR21VLcWYR + rm /tmp/tmp.jARxjRt4DQ /tmp/tmp.vR21VLcWYR + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.7xg1XpJa6P ++ mktemp + local LAST_ERR=/tmp/tmp.Qrf5JeWXDk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7xg1XpJa6P customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.Qrf5JeWXDk + rm /tmp/tmp.7xg1XpJa6P /tmp/tmp.Qrf5JeWXDk + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.SjlJhsEorq ++ mktemp + local LAST_ERR=/tmp/tmp.iYg3dPyMOz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SjlJhsEorq clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.iYg3dPyMOz + rm /tmp/tmp.SjlJhsEorq /tmp/tmp.iYg3dPyMOz + return 0 + kubectl_bin apply -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1566-1480088f") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.lw4BjPwY6t ++ mktemp + local LAST_ERR=/tmp/tmp.CKblTLPO0j + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lw4BjPwY6t deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.CKblTLPO0j + rm /tmp/tmp.lw4BjPwY6t /tmp/tmp.CKblTLPO0j + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.IrDYBiwfwM +++ mktemp ++ local LAST_ERR=/tmp/tmp.r4NIg4H8Oq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IrDYBiwfwM ++ cat /tmp/tmp.r4NIg4H8Oq ++ rm /tmp/tmp.IrDYBiwfwM /tmp/tmp.r4NIg4H8Oq ++ return 0 + wait_pod percona-server-mongodb-operator-75f5b5bffd-nzv5m + local pod=percona-server-mongodb-operator-75f5b5bffd-nzv5m + set +o xtrace waiting for pod/percona-server-mongodb-operator-75f5b5bffd-nzv5m to be ready.OK + create_namespace pitr-sharded-26322 + local namespace=pitr-sharded-26322 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns ++ mktemp + '[' -n '' ']' + desc 'cleaned up old namespaces pitr-sharded-26322' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pitr-sharded-26322 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pitr-sharded-26322 --ignore-not-found + awk '{print$1}' ++ mktemp + local LAST_OUT=/tmp/tmp.T4O7x0Yw88 + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + local LAST_OUT=/tmp/tmp.h7AqBLmhMM ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.SivkrLnTFX + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.uPOIW5Mj3p + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pitr-sharded-26322 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.T4O7x0Yw88 + cat /tmp/tmp.SivkrLnTFX + rm /tmp/tmp.T4O7x0Yw88 /tmp/tmp.SivkrLnTFX + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.h7AqBLmhMM + cat /tmp/tmp.uPOIW5Mj3p + rm /tmp/tmp.h7AqBLmhMM /tmp/tmp.uPOIW5Mj3p + return 0 + kubectl_bin wait --for=delete namespace pitr-sharded-26322 ++ mktemp + local LAST_OUT=/tmp/tmp.Q09VjhGkyB ++ mktemp + local LAST_ERR=/tmp/tmp.SPtuKF4RJr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace pitr-sharded-26322 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Q09VjhGkyB + cat /tmp/tmp.SPtuKF4RJr + rm /tmp/tmp.Q09VjhGkyB /tmp/tmp.SPtuKF4RJr + return 0 + desc 'create namespace pitr-sharded-26322' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pitr-sharded-26322 ----------------------------------------------------------------------------------- + kubectl_bin create namespace pitr-sharded-26322 ++ mktemp + local LAST_OUT=/tmp/tmp.9xyfOJeLSw ++ mktemp + local LAST_ERR=/tmp/tmp.UYWDT1aByq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pitr-sharded-26322 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9xyfOJeLSw namespace/pitr-sharded-26322 created + cat /tmp/tmp.UYWDT1aByq + rm /tmp/tmp.9xyfOJeLSw /tmp/tmp.UYWDT1aByq + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.u1c4NGvjfc +++ mktemp ++ local LAST_ERR=/tmp/tmp.lGHQPUXB2G ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.u1c4NGvjfc ++ cat /tmp/tmp.lGHQPUXB2G ++ rm /tmp/tmp.u1c4NGvjfc /tmp/tmp.lGHQPUXB2G ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1566-1480088f-7-cluster7 --namespace=pitr-sharded-26322 ++ mktemp + local LAST_OUT=/tmp/tmp.CuI7vC1rqN ++ mktemp + local LAST_ERR=/tmp/tmp.Ox2Xp2V3QZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1566-1480088f-7-cluster7 --namespace=pitr-sharded-26322 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CuI7vC1rqN Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1566-1480088f-7-cluster7" modified. + cat /tmp/tmp.Ox2Xp2V3QZ + rm /tmp/tmp.CuI7vC1rqN /tmp/tmp.Ox2Xp2V3QZ + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Tue Jun 11 14:29:08 2024 NAMESPACE: pitr-sharded-26322 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.pitr-sharded-26322.svc.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace pitr-sharded-26322 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace pitr-sharded-26322 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace pitr-sharded-26322 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace pitr-sharded-26322 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nmZpKc20er +++ mktemp ++ local LAST_ERR=/tmp/tmp.EpKzrreP7i ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nmZpKc20er ++ cat /tmp/tmp.EpKzrreP7i ++ rm /tmp/tmp.nmZpKc20er /tmp/tmp.EpKzrreP7i ++ return 0 + MINIO_POD=minio-service-57dd49b-hnnw4 + wait_pod minio-service-57dd49b-hnnw4 + local pod=minio-service-57dd49b-hnnw4 + set +o xtrace waiting for pod/minio-service-57dd49b-hnnw4 to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-sharded-26322.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.DJ9ZSQ2RwI ++ mktemp + local LAST_ERR=/tmp/tmp.enHlSGjffZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-sharded-26322.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DJ9ZSQ2RwI service/minio-service created + cat /tmp/tmp.enHlSGjffZ + rm /tmp/tmp.DJ9ZSQ2RwI /tmp/tmp.enHlSGjffZ + return 0 + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.J24rSH6YgA ++ mktemp + local LAST_ERR=/tmp/tmp.EaeIfIzTsE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.J24rSH6YgA make_bucket: operator-testing pod "aws-cli" deleted + cat /tmp/tmp.EaeIfIzTsE + rm /tmp/tmp.J24rSH6YgA /tmp/tmp.EaeIfIzTsE + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/conf/minio-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.j9hUbwNyOD ++ mktemp + local LAST_ERR=/tmp/tmp.VwsLUN51TD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/conf/minio-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.j9hUbwNyOD secret/some-users created deployment.apps/psmdb-client created secret/minio-secret created + cat /tmp/tmp.VwsLUN51TD + rm /tmp/tmp.j9hUbwNyOD /tmp/tmp.VwsLUN51TD + return 0 + desc 'create custom RuntimeClass' + set +o xtrace ----------------------------------------------------------------------------------- create custom RuntimeClass ----------------------------------------------------------------------------------- + version_gt 1.19 ++ echo '1.26 >= 1.19' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' 0 -ne 1 ']' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/conf/container-rc.yaml + /usr/bin/sed s/docker/runc/g + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.FRCnbErJpB ++ mktemp + local LAST_ERR=/tmp/tmp.UqLeUBBydv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FRCnbErJpB runtimeclass.node.k8s.io/container-rc unchanged + cat /tmp/tmp.UqLeUBBydv + rm /tmp/tmp.FRCnbErJpB /tmp/tmp.UqLeUBBydv + return 0 + cluster=some-name + desc 'create first PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name ----------------------------------------------------------------------------------- + '[' 0 -eq 0 ']' + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/conf/some-name-rs0.yml + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/conf/some-name-rs0.yml + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1566-1480088f"' + yq eval '.spec.upgradeOptions.apply="Never"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.PkAm9PNOTk ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + local LAST_ERR=/tmp/tmp.0ljlB0v12d + local exit_status=0 + local timeout=4 + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PkAm9PNOTk perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.0ljlB0v12d + rm /tmp/tmp.PkAm9PNOTk /tmp/tmp.0ljlB0v12d + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.......OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready......OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dWKFyQlHkC +++ mktemp ++ local LAST_ERR=/tmp/tmp.5UVAuJFCDY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dWKFyQlHkC ++ cat /tmp/tmp.5UVAuJFCDY ++ rm /tmp/tmp.dWKFyQlHkC /tmp/tmp.5UVAuJFCDY ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.weWaRX2GaN +++ mktemp ++ local LAST_ERR=/tmp/tmp.8C7ecPbhbd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.weWaRX2GaN ++ cat /tmp/tmp.8C7ecPbhbd ++ rm /tmp/tmp.weWaRX2GaN /tmp/tmp.8C7ecPbhbd ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness................................................. + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HKX7FFwiKp +++ mktemp ++ local LAST_ERR=/tmp/tmp.1x1BJSuPxP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HKX7FFwiKp ++ cat /tmp/tmp.1x1BJSuPxP ++ rm /tmp/tmp.HKX7FFwiKp /tmp/tmp.1x1BJSuPxP ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vwCcEjbUfi +++ mktemp ++ local LAST_ERR=/tmp/tmp.hj9Avl1wQv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vwCcEjbUfi ++ cat /tmp/tmp.hj9Avl1wQv ++ rm /tmp/tmp.vwCcEjbUfi /tmp/tmp.hj9Avl1wQv ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E85yRjVXAP +++ mktemp ++ local LAST_ERR=/tmp/tmp.UlqCLCBxnB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E85yRjVXAP ++ cat /tmp/tmp.UlqCLCBxnB ++ rm /tmp/tmp.E85yRjVXAP /tmp/tmp.UlqCLCBxnB ++ return 0 + [[ ready == \r\e\a\d\y ]] + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.646agzkQVK/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("pitr-sharded-26322", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.jUEPexq9GY ++ mktemp + local LAST_ERR=/tmp/tmp.TeExmnIBqJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jUEPexq9GY + cat /tmp/tmp.TeExmnIBqJ + rm /tmp/tmp.jUEPexq9GY /tmp/tmp.TeExmnIBqJ + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.646agzkQVK/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.646agzkQVK/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.646agzkQVK/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs0.yml /tmp/tmp.646agzkQVK/statefulset_some-name-rs0.yml + compare_kubectl statefulset/some-name-rs1 + local resource=statefulset/some-name-rs1 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs1.yml + local new_result=/tmp/tmp.646agzkQVK/statefulset_some-name-rs1.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs1-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs1 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("pitr-sharded-26322", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.knw3yBHXzt ++ mktemp + local LAST_ERR=/tmp/tmp.od7Mrc2FQL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs1 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.knw3yBHXzt + cat /tmp/tmp.od7Mrc2FQL + rm /tmp/tmp.knw3yBHXzt /tmp/tmp.od7Mrc2FQL + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.646agzkQVK/statefulset_some-name-rs1.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.646agzkQVK/statefulset_some-name-rs1.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.646agzkQVK/statefulset_some-name-rs1.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs1.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs1.yml /tmp/tmp.646agzkQVK/statefulset_some-name-rs1.yml + compare_kubectl statefulset/some-name-rs2 + local resource=statefulset/some-name-rs2 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs2.yml + local new_result=/tmp/tmp.646agzkQVK/statefulset_some-name-rs2.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs2-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs2 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("pitr-sharded-26322", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.SbxaArkKWv ++ mktemp + local LAST_ERR=/tmp/tmp.QsX1N9XQI0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs2 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SbxaArkKWv + cat /tmp/tmp.QsX1N9XQI0 + rm /tmp/tmp.SbxaArkKWv /tmp/tmp.QsX1N9XQI0 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.646agzkQVK/statefulset_some-name-rs2.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.646agzkQVK/statefulset_some-name-rs2.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.646agzkQVK/statefulset_some-name-rs2.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs2.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-rs2.yml /tmp/tmp.646agzkQVK/statefulset_some-name-rs2.yml + compare_kubectl statefulset/some-name-cfg + local resource=statefulset/some-name-cfg + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-cfg.yml + local new_result=/tmp/tmp.646agzkQVK/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("pitr-sharded-26322", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.BjzWzmGBw5 ++ mktemp + local LAST_ERR=/tmp/tmp.pzyfTCfV1o + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BjzWzmGBw5 + cat /tmp/tmp.pzyfTCfV1o + rm /tmp/tmp.BjzWzmGBw5 /tmp/tmp.pzyfTCfV1o + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.646agzkQVK/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.646agzkQVK/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.646agzkQVK/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-cfg.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-cfg.yml /tmp/tmp.646agzkQVK/statefulset_some-name-cfg.yml + compare_kubectl statefulset/some-name-mongos '' + local resource=statefulset/some-name-mongos + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-mongos.yml + local new_result=/tmp/tmp.646agzkQVK/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("pitr-sharded-26322", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.TYxKtjk3cC ++ mktemp + local LAST_ERR=/tmp/tmp.hQEQCOZh7O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TYxKtjk3cC + cat /tmp/tmp.hQEQCOZh7O + rm /tmp/tmp.TYxKtjk3cC /tmp/tmp.hQEQCOZh7O + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.646agzkQVK/statefulset_some-name-mongos.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.646agzkQVK/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.646agzkQVK/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-mongos.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/statefulset_some-name-mongos.yml /tmp/tmp.646agzkQVK/statefulset_some-name-mongos.yml + write_initial_data + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.pitr-sharded-26322 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.pitr-sharded-26322 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Jo2M46SGi3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.eWzDoZj8E9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Jo2M46SGi3 ++ cat /tmp/tmp.eWzDoZj8E9 ++ rm /tmp/tmp.Jo2M46SGi3 /tmp/tmp.eWzDoZj8E9 ++ return 0 + local client_container=psmdb-client-7469665986-8krmt + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.ubLJscbrmD ++ mktemp + local LAST_ERR=/tmp/tmp.ml6Ju7dZGL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ubLJscbrmD Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-sharded-26322.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("2f9b1213-5d11-420c-8a30-c8c1e5d146a1") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.ml6Ju7dZGL + rm /tmp/tmp.ubLJscbrmD /tmp/tmp.ml6Ju7dZGL + return 0 + sleep 2 + write_document + local cmp_postfix= + desc 'write initial data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write initial data, read from all ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-sharded-26322 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-26322 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NhfPbUbu5B +++ mktemp ++ local LAST_ERR=/tmp/tmp.VJKpsd6EmG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NhfPbUbu5B ++ cat /tmp/tmp.VJKpsd6EmG ++ rm /tmp/tmp.NhfPbUbu5B /tmp/tmp.VJKpsd6EmG ++ return 0 + local client_container=psmdb-client-7469665986-8krmt + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.5KurVt3821 ++ mktemp + local LAST_ERR=/tmp/tmp.G5K7bphH2T + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5KurVt3821 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-sharded-26322.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("4dbacfe2-a897-4061-8ffd-acabc0cc5db6") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.G5K7bphH2T + rm /tmp/tmp.5KurVt3821 /tmp/tmp.G5K7bphH2T + return 0 + minikube_sleep + sleep_time=10 + [[ '' == 1 ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-sharded-26322 + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-sharded-26322 + local postfix= + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-sharded-26322 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-26322 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Z4aZRZQflk +++ mktemp ++ local LAST_ERR=/tmp/tmp.5sNDQCzR4U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Z4aZRZQflk ++ cat /tmp/tmp.5sNDQCzR4U ++ rm /tmp/tmp.Z4aZRZQflk /tmp/tmp.5sNDQCzR4U ++ return 0 + local client_container=psmdb-client-7469665986-8krmt + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.4DuDye1R07 ++ mktemp + local LAST_ERR=/tmp/tmp.dlo08jRQLt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4DuDye1R07 + cat /tmp/tmp.dlo08jRQLt + rm /tmp/tmp.4DuDye1R07 /tmp/tmp.dlo08jRQLt + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/find.json /tmp/tmp.646agzkQVK/find + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace some-name-rs0-0 + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace some-name-rs0-1 + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace some-name-rs0-2 + wait_backup_agent some-name-rs1-0 + local agent_pod=some-name-rs1-0 + set +o xtrace some-name-rs1-0 + wait_backup_agent some-name-rs1-1 + local agent_pod=some-name-rs1-1 + set +o xtrace some-name-rs1-1 + wait_backup_agent some-name-rs1-2 + local agent_pod=some-name-rs1-2 + set +o xtrace some-name-rs1-2 + wait_backup_agent some-name-rs2-0 + local agent_pod=some-name-rs2-0 + set +o xtrace some-name-rs2-0 + wait_backup_agent some-name-rs2-1 + local agent_pod=some-name-rs2-1 + set +o xtrace some-name-rs2-1 + wait_backup_agent some-name-rs2-2 + local agent_pod=some-name-rs2-2 + set +o xtrace some-name-rs2-2 + local backup_name + '[' 0 -eq 0 ']' + backup_name=backup-minio + run_backup backup-minio 0 + local name=backup-minio + local idx=0 + desc 'run backup backup-minio-0' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-0 ----------------------------------------------------------------------------------- + /usr/bin/sed -e 's/name:/name: backup-minio-0/' + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/conf/backup-minio.yml ++ mktemp + local LAST_OUT=/tmp/tmp.pbxIfgLtQB ++ mktemp + local LAST_ERR=/tmp/tmp.eYxkFZeqcG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pbxIfgLtQB perconaservermongodbbackup.psmdb.percona.com/backup-minio-0 created + cat /tmp/tmp.eYxkFZeqcG + rm /tmp/tmp.pbxIfgLtQB /tmp/tmp.eYxkFZeqcG + return 0 + wait_backup backup-minio-0 + local backup_name=backup-minio-0 + set +o xtrace backup-minio-0.................. + sleep 5 + compare_latest_restorable_time some-name-rs0 backup-minio-0 + local cluster=some-name-rs0 + local backup_name=backup-minio-0 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rmjRU1gCHJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ehvprh0oZq +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.rmjRU1gCHJ +++ cat /tmp/tmp.ehvprh0oZq +++ rm /tmp/tmp.rmjRU1gCHJ /tmp/tmp.ehvprh0oZq +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4uHai4MtU6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pdVU3TqX3k +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4uHai4MtU6 +++ cat /tmp/tmp.pdVU3TqX3k +++ rm /tmp/tmp.4uHai4MtU6 /tmp/tmp.pdVU3TqX3k +++ return 0 ++ first_timestamp=null ++ sleep 5 ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] ++ let retry+=1 ++ [[ 2 -gt 30 ]] ++ [[ null != '' ]] ++ [[ null != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.lZ73dusLC3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mWD15RGzQN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.lZ73dusLC3 +++ cat /tmp/tmp.mWD15RGzQN +++ rm /tmp/tmp.lZ73dusLC3 /tmp/tmp.mWD15RGzQN +++ return 0 ++ first_timestamp=1718116480 ++ sleep 5 ++ [[ 1718116480 != '' ]] ++ [[ 1718116480 != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fN2NfcRQsA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.d1pTzLOy9G +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.fN2NfcRQsA +++ cat /tmp/tmp.d1pTzLOy9G +++ rm /tmp/tmp.fN2NfcRQsA /tmp/tmp.d1pTzLOy9G +++ return 0 ++ second_timestamp=1718116480 ++ let retry+=1 ++ [[ 3 -gt 30 ]] ++ [[ 1718116480 != '' ]] ++ [[ 1718116480 != \n\u\l\l ]] ++ [[ 1718116480 == 1718116480 ]] ++ /usr/bin/date -u -d @1718116480 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2024-06-11T14:34:40Z ++ get_latest_restorable_time_from_backup_object backup-minio-0 ++ local backup_name=backup-minio-0 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-0 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WWyZJwluzA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qO7CB6cKx7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb-backup backup-minio-0 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.WWyZJwluzA +++ cat /tmp/tmp.qO7CB6cKx7 +++ rm /tmp/tmp.WWyZJwluzA /tmp/tmp.qO7CB6cKx7 +++ return 0 ++ latestRestorableTime=2024-06-11T14:34:40Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2024-06-11T14:34:40Z != '' ]] ++ [[ 2024-06-11T14:34:40Z != \n\u\l\l ]] ++ echo 2024-06-11T14:34:40Z + backup_time=2024-06-11T14:34:40Z + [[ 2024-06-11T14:34:40Z != \2\0\2\4\-\0\6\-\1\1\T\1\4\:\3\4\:\4\0\Z ]] + write_document -2nd + local cmp_postfix=-2nd + desc 'write initial data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write initial data, read from all ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.pitr-sharded-26322 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-26322 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BnbWPVreQB +++ mktemp ++ local LAST_ERR=/tmp/tmp.rw8uNNCYcK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BnbWPVreQB ++ cat /tmp/tmp.rw8uNNCYcK ++ rm /tmp/tmp.BnbWPVreQB /tmp/tmp.rw8uNNCYcK ++ return 0 + local client_container=psmdb-client-7469665986-8krmt + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.0ihvkRg9bF ++ mktemp + local LAST_ERR=/tmp/tmp.UdqbtbiQ84 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0ihvkRg9bF Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-sharded-26322.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("8e610586-99f3-4934-9f05-2a7210423989") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.UdqbtbiQ84 + rm /tmp/tmp.0ihvkRg9bF /tmp/tmp.UdqbtbiQ84 + return 0 + minikube_sleep + sleep_time=10 + [[ '' == 1 ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-sharded-26322 -2nd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-sharded-26322 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-sharded-26322 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-26322 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8L5Nk3eSE7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.gYupSfuhbn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8L5Nk3eSE7 ++ cat /tmp/tmp.gYupSfuhbn ++ rm /tmp/tmp.8L5Nk3eSE7 /tmp/tmp.gYupSfuhbn ++ return 0 + local client_container=psmdb-client-7469665986-8krmt + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.IgpJOM27aa ++ mktemp + local LAST_ERR=/tmp/tmp.XDcbNTMXeN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IgpJOM27aa + cat /tmp/tmp.XDcbNTMXeN + rm /tmp/tmp.IgpJOM27aa /tmp/tmp.XDcbNTMXeN + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/find-2nd.json /tmp/tmp.646agzkQVK/find-2nd + sleep 2 ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ run_mongos 'new Date().toISOString()' myApp:myPass@some-name-mongos.pitr-sharded-26322 mongodb '' --quiet ++ cut -c1-19 ++ local 'command=new Date().toISOString()' ++ local uri=myApp:myPass@some-name-mongos.pitr-sharded-26322 ++ local driver=mongodb ++ tr T ' ' ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.l6f1vjJjFE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vwdI0SKOWK +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.l6f1vjJjFE +++ cat /tmp/tmp.vwdI0SKOWK +++ rm /tmp/tmp.l6f1vjJjFE /tmp/tmp.vwdI0SKOWK +++ return 0 ++ local client_container=psmdb-client-7469665986-8krmt ++ local mongo_flag=--quiet ++ kubectl_bin exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''new Date().toISOString()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VZNmNtW253 +++ mktemp ++ local LAST_ERR=/tmp/tmp.sklHDLQpGo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''new Date().toISOString()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VZNmNtW253 ++ cat /tmp/tmp.sklHDLQpGo ++ rm /tmp/tmp.VZNmNtW253 /tmp/tmp.sklHDLQpGo ++ return 0 + time_now='2024-06-11 14:35:11' + check_recovery backup-minio-0 date '2024-06-11 14:35:11' -2nd some-name + local backup_name=backup-minio-0 + local restore_type=date + local 'restore_date=2024-06-11 14:35:11' + local cmp_postfix=-2nd + local cluster_name=some-name + desc 'write more data before restore by date' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by date ----------------------------------------------------------------------------------- + sleep 60 + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.pitr-sharded-26322 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-26322 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BYWXZV7hRx +++ mktemp ++ local LAST_ERR=/tmp/tmp.WzVVioJrfb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BYWXZV7hRx ++ cat /tmp/tmp.WzVVioJrfb ++ rm /tmp/tmp.BYWXZV7hRx /tmp/tmp.WzVVioJrfb ++ return 0 + local client_container=psmdb-client-7469665986-8krmt + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.Nnf0spI7cX ++ mktemp + local LAST_ERR=/tmp/tmp.SnXOhDGFtH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Nnf0spI7cX Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-sharded-26322.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("87bf5176-8a0c-4893-b7e0-b0b2fef47703") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.SnXOhDGFtH + rm /tmp/tmp.Nnf0spI7cX /tmp/tmp.SnXOhDGFtH + return 0 + desc 'waiting for chunks to be uploaded' + set +o xtrace ----------------------------------------------------------------------------------- waiting for chunks to be uploaded ----------------------------------------------------------------------------------- + sleep 120 + desc 'check restore by date' + set +o xtrace ----------------------------------------------------------------------------------- check restore by date ----------------------------------------------------------------------------------- + '[' -z '2024-06-11 14:35:11' ']' + /usr/bin/sed -e 's/date:/date: 2024-06-11 14:35:11/' + /usr/bin/sed -e 's/type:/type: date/' + /usr/bin/sed -e 's/backupName:/backupName: backup-minio-0/' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/conf/restore.yml + /usr/bin/sed -e 's/name:/name: restore-backup-minio-0/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.BSdwCuW4SU ++ mktemp + local LAST_ERR=/tmp/tmp.10WMZnnET3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BSdwCuW4SU perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-0 created + cat /tmp/tmp.10WMZnnET3 + rm /tmp/tmp.BSdwCuW4SU /tmp/tmp.10WMZnnET3 + return 0 + wait_restore backup-minio-0 some-name ready 1 + local backup_name=backup-minio-0 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=780 + set +o xtrace waiting psmdb-restore/backup-minio-0 to reach ready state.................. + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AS4XGnToSS +++ mktemp ++ local LAST_ERR=/tmp/tmp.2xFudi30H1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AS4XGnToSS ++ cat /tmp/tmp.2xFudi30H1 ++ rm /tmp/tmp.AS4XGnToSS /tmp/tmp.2xFudi30H1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jiqVT85oxp +++ mktemp ++ local LAST_ERR=/tmp/tmp.eMRO7LconN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jiqVT85oxp ++ cat /tmp/tmp.eMRO7LconN ++ rm /tmp/tmp.jiqVT85oxp /tmp/tmp.eMRO7LconN ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LqOLdWmDQO +++ mktemp ++ local LAST_ERR=/tmp/tmp.ql9zqVrO9c ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LqOLdWmDQO ++ cat /tmp/tmp.ql9zqVrO9c ++ rm /tmp/tmp.LqOLdWmDQO /tmp/tmp.ql9zqVrO9c ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7uTgtKuNLH +++ mktemp ++ local LAST_ERR=/tmp/tmp.pp8u5N8l32 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7uTgtKuNLH ++ cat /tmp/tmp.pp8u5N8l32 ++ rm /tmp/tmp.7uTgtKuNLH /tmp/tmp.pp8u5N8l32 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BG0I8rH5xG +++ mktemp ++ local LAST_ERR=/tmp/tmp.qdU5qDd7Q8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BG0I8rH5xG ++ cat /tmp/tmp.qdU5qDd7Q8 ++ rm /tmp/tmp.BG0I8rH5xG /tmp/tmp.qdU5qDd7Q8 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MMM0bK10k7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.m1c5w3Wd3e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MMM0bK10k7 ++ cat /tmp/tmp.m1c5w3Wd3e ++ rm /tmp/tmp.MMM0bK10k7 /tmp/tmp.m1c5w3Wd3e ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.X71USb2afg +++ mktemp ++ local LAST_ERR=/tmp/tmp.ckWaLpBSfB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.X71USb2afg ++ cat /tmp/tmp.ckWaLpBSfB ++ rm /tmp/tmp.X71USb2afg /tmp/tmp.ckWaLpBSfB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TwSUGdsJhz +++ mktemp ++ local LAST_ERR=/tmp/tmp.xjxcQcBdFQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TwSUGdsJhz ++ cat /tmp/tmp.xjxcQcBdFQ ++ rm /tmp/tmp.TwSUGdsJhz /tmp/tmp.xjxcQcBdFQ ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + set -o xtrace + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OsOQlwaLrV +++ mktemp ++ local LAST_ERR=/tmp/tmp.iWgfNRnwFU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OsOQlwaLrV ++ cat /tmp/tmp.iWgfNRnwFU ++ rm /tmp/tmp.OsOQlwaLrV /tmp/tmp.iWgfNRnwFU ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ORi3KCyZGd +++ mktemp ++ local LAST_ERR=/tmp/tmp.9xn88P8Ati ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ORi3KCyZGd ++ cat /tmp/tmp.9xn88P8Ati ++ rm /tmp/tmp.ORi3KCyZGd /tmp/tmp.9xn88P8Ati ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 10 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-sharded-26322 -2nd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-sharded-26322 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-sharded-26322 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-26322 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DkXamk1uro +++ mktemp ++ local LAST_ERR=/tmp/tmp.eOBq5TX4Yp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DkXamk1uro ++ cat /tmp/tmp.eOBq5TX4Yp ++ rm /tmp/tmp.DkXamk1uro /tmp/tmp.eOBq5TX4Yp ++ return 0 + local client_container=psmdb-client-7469665986-8krmt + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.xH9xVa3W7U ++ mktemp + local LAST_ERR=/tmp/tmp.yZ4FOUvfRM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xH9xVa3W7U + cat /tmp/tmp.yZ4FOUvfRM + rm /tmp/tmp.xH9xVa3W7U /tmp/tmp.yZ4FOUvfRM + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/find-2nd.json /tmp/tmp.646agzkQVK/find-2nd + run_backup backup-minio 1 + local name=backup-minio + local idx=1 + desc 'run backup backup-minio-1' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-1 ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/conf/backup-minio.yml + /usr/bin/sed -e 's/name:/name: backup-minio-1/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.fXXUD5zDmf ++ mktemp + local LAST_ERR=/tmp/tmp.Cjcbd7YJKz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fXXUD5zDmf perconaservermongodbbackup.psmdb.percona.com/backup-minio-1 created + cat /tmp/tmp.Cjcbd7YJKz + rm /tmp/tmp.fXXUD5zDmf /tmp/tmp.Cjcbd7YJKz + return 0 + wait_backup backup-minio-1 + local backup_name=backup-minio-1 + set +o xtrace backup-minio-1.................... + sleep 5 + check_recovery backup-minio-1 latest '' -3rd some-name + local backup_name=backup-minio-1 + local restore_type=latest + local restore_date= + local cmp_postfix=-3rd + local cluster_name=some-name + desc 'write more data before restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- write more data before restore by latest ----------------------------------------------------------------------------------- + sleep 60 + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.pitr-sharded-26322 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-26322 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nVPLMZKuUf +++ mktemp ++ local LAST_ERR=/tmp/tmp.wrqdRQeS0K ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nVPLMZKuUf ++ cat /tmp/tmp.wrqdRQeS0K ++ rm /tmp/tmp.nVPLMZKuUf /tmp/tmp.wrqdRQeS0K ++ return 0 + local client_container=psmdb-client-7469665986-8krmt + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.0BIkpEI9dY ++ mktemp + local LAST_ERR=/tmp/tmp.JPfJDwbKDz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0BIkpEI9dY Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.pitr-sharded-26322.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("83425592-cacb-4314-beaa-4c3ed291e9af") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.JPfJDwbKDz + rm /tmp/tmp.0BIkpEI9dY /tmp/tmp.JPfJDwbKDz + return 0 + desc 'waiting for chunks to be uploaded' + set +o xtrace ----------------------------------------------------------------------------------- waiting for chunks to be uploaded ----------------------------------------------------------------------------------- + sleep 120 + desc 'check restore by latest' + set +o xtrace ----------------------------------------------------------------------------------- check restore by latest ----------------------------------------------------------------------------------- + /usr/bin/sed -e 's/name:/name: restore-backup-minio-1/' + kubectl_bin apply -f - + '[' -z '' ']' + /usr/bin/sed -e 's/backupName:/backupName: backup-minio-1/' + /usr/bin/sed -e /date:/d + /usr/bin/sed -e 's/type:/type: latest/' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/conf/restore.yml ++ mktemp + local LAST_OUT=/tmp/tmp.WT5k1Vm1g9 ++ mktemp + local LAST_ERR=/tmp/tmp.RQw8qRRkM0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WT5k1Vm1g9 perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-1 created + cat /tmp/tmp.RQw8qRRkM0 + rm /tmp/tmp.WT5k1Vm1g9 /tmp/tmp.RQw8qRRkM0 + return 0 + wait_restore backup-minio-1 some-name ready 1 + local backup_name=backup-minio-1 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=780 + set +o xtrace waiting psmdb-restore/backup-minio-1 to reach ready state........... + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nVUSdWReTS +++ mktemp ++ local LAST_ERR=/tmp/tmp.GkAHZAcqGU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nVUSdWReTS ++ cat /tmp/tmp.GkAHZAcqGU ++ rm /tmp/tmp.nVUSdWReTS /tmp/tmp.GkAHZAcqGU ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.x5rREzkfBW +++ mktemp ++ local LAST_ERR=/tmp/tmp.L3XSF44t1Q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.x5rREzkfBW ++ cat /tmp/tmp.L3XSF44t1Q ++ rm /tmp/tmp.x5rREzkfBW /tmp/tmp.L3XSF44t1Q ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vb8RFyhe9D +++ mktemp ++ local LAST_ERR=/tmp/tmp.aZHvSp0V5B ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vb8RFyhe9D ++ cat /tmp/tmp.aZHvSp0V5B ++ rm /tmp/tmp.vb8RFyhe9D /tmp/tmp.aZHvSp0V5B ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VIeHG2WHpD +++ mktemp ++ local LAST_ERR=/tmp/tmp.KFzfwqCKbZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VIeHG2WHpD ++ cat /tmp/tmp.KFzfwqCKbZ ++ rm /tmp/tmp.VIeHG2WHpD /tmp/tmp.KFzfwqCKbZ ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.abifera5HL +++ mktemp ++ local LAST_ERR=/tmp/tmp.hbSZhUyf8D ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.abifera5HL ++ cat /tmp/tmp.hbSZhUyf8D ++ rm /tmp/tmp.abifera5HL /tmp/tmp.hbSZhUyf8D ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xSL2zkuArt +++ mktemp ++ local LAST_ERR=/tmp/tmp.HuB4SFoAMH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xSL2zkuArt ++ cat /tmp/tmp.HuB4SFoAMH ++ rm /tmp/tmp.xSL2zkuArt /tmp/tmp.HuB4SFoAMH ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Qxk6jTEViu +++ mktemp ++ local LAST_ERR=/tmp/tmp.1H7SRVVAxG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Qxk6jTEViu ++ cat /tmp/tmp.1H7SRVVAxG ++ rm /tmp/tmp.Qxk6jTEViu /tmp/tmp.1H7SRVVAxG ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IUWHxcFtdl +++ mktemp ++ local LAST_ERR=/tmp/tmp.YxjYgSeTbq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IUWHxcFtdl ++ cat /tmp/tmp.YxjYgSeTbq ++ rm /tmp/tmp.IUWHxcFtdl /tmp/tmp.YxjYgSeTbq ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.r1K7QLa69M +++ mktemp ++ local LAST_ERR=/tmp/tmp.hRzG4EHwZH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.r1K7QLa69M ++ cat /tmp/tmp.hRzG4EHwZH ++ rm /tmp/tmp.r1K7QLa69M /tmp/tmp.hRzG4EHwZH ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + set -o xtrace + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.z4PpeNU9L4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.jp9hDR8o3n ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.z4PpeNU9L4 ++ cat /tmp/tmp.jp9hDR8o3n ++ rm /tmp/tmp.z4PpeNU9L4 /tmp/tmp.jp9hDR8o3n ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hkhDua3k3S +++ mktemp ++ local LAST_ERR=/tmp/tmp.YI9kCSfZ9U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hkhDua3k3S ++ cat /tmp/tmp.YI9kCSfZ9U ++ rm /tmp/tmp.hkhDua3k3S /tmp/tmp.YI9kCSfZ9U ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 10 + compare_mongos_cmd find myApp:myPass@some-name-mongos.pitr-sharded-26322 -3rd + local command=find + local uri=myApp:myPass@some-name-mongos.pitr-sharded-26322 + local postfix=-3rd + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.pitr-sharded-26322 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.pitr-sharded-26322 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.7bfty1UDfM +++ mktemp ++ local LAST_ERR=/tmp/tmp.3Q5oj2hZe7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7bfty1UDfM ++ cat /tmp/tmp.3Q5oj2hZe7 ++ rm /tmp/tmp.7bfty1UDfM /tmp/tmp.3Q5oj2hZe7 ++ return 0 + local client_container=psmdb-client-7469665986-8krmt + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.fM8nD0JGry ++ mktemp + local LAST_ERR=/tmp/tmp.R0XLaEfof8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8krmt -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.pitr-sharded-26322.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fM8nD0JGry + cat /tmp/tmp.R0XLaEfof8 + rm /tmp/tmp.fM8nD0JGry /tmp/tmp.R0XLaEfof8 + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/pitr-sharded/compare/find-3rd.json /tmp/tmp.646agzkQVK/find-3rd + compare_latest_restorable_time some-name-rs0 backup-minio-1 + local cluster=some-name-rs0 + local backup_name=backup-minio-1 + local latest_restorable_time + local backup_time ++ get_latest_restorable_time some-name-rs0 ++ local cluster=some-name-rs0 ++ local first_timestamp ++ local second_timestamp ++ local retry=0 ++ [[ '' != '' ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vvGN0lAudd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0t7Q6Ibdyu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.vvGN0lAudd +++ cat /tmp/tmp.0t7Q6Ibdyu +++ rm /tmp/tmp.vvGN0lAudd /tmp/tmp.0t7Q6Ibdyu +++ return 0 ++ first_timestamp=1718117078 ++ sleep 5 ++ [[ 1718117078 != '' ]] ++ [[ 1718117078 != \n\u\l\l ]] +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.bGMTayWVDN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.jeOnHjD4Oh +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.bGMTayWVDN +++ cat /tmp/tmp.jeOnHjD4Oh +++ rm /tmp/tmp.bGMTayWVDN /tmp/tmp.jeOnHjD4Oh +++ return 0 ++ second_timestamp=1718117078 ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 1718117078 != '' ]] ++ [[ 1718117078 != \n\u\l\l ]] ++ [[ 1718117078 == 1718117078 ]] ++ /usr/bin/date -u -d @1718117078 +%Y-%m-%dT%H:%M:%SZ + latest_restorable_time=2024-06-11T14:44:38Z ++ get_latest_restorable_time_from_backup_object backup-minio-1 ++ local backup_name=backup-minio-1 ++ local latestRestorableTime ++ local retry=0 ++ [[ '' != '' ]] ++ sleep 5 +++ kubectl_bin get psmdb-backup backup-minio-1 -o 'jsonpath={.status.latestRestorableTime}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7n5TNNrmJX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5Lkp5A6kzI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb-backup backup-minio-1 -o 'jsonpath={.status.latestRestorableTime}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.7n5TNNrmJX +++ cat /tmp/tmp.5Lkp5A6kzI +++ rm /tmp/tmp.7n5TNNrmJX /tmp/tmp.5Lkp5A6kzI +++ return 0 ++ latestRestorableTime=2024-06-11T14:44:38Z ++ let retry+=1 ++ [[ 1 -gt 30 ]] ++ [[ 2024-06-11T14:44:38Z != '' ]] ++ [[ 2024-06-11T14:44:38Z != \n\u\l\l ]] ++ echo 2024-06-11T14:44:38Z + backup_time=2024-06-11T14:44:38Z + [[ 2024-06-11T14:44:38Z != \2\0\2\4\-\0\6\-\1\1\T\1\4\:\4\4\:\3\8\Z ]] + desc 'delete custom RuntimeClass' + set +o xtrace ----------------------------------------------------------------------------------- delete custom RuntimeClass ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/conf/container-rc.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.zNOoh4eHz6 ++ mktemp + local LAST_ERR=/tmp/tmp.7uV5A0AcBK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/e2e-tests/conf/container-rc.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zNOoh4eHz6 runtimeclass.node.k8s.io "container-rc" deleted + cat /tmp/tmp.7uV5A0AcBK + rm /tmp/tmp.zNOoh4eHz6 /tmp/tmp.7uV5A0AcBK + return 0 + destroy pitr-sharded-26322 + local namespace=pitr-sharded-26322 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.0jdjyq3AJu ++ mktemp + local LAST_ERR=/tmp/tmp.QNteElik8D + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0jdjyq3AJu customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.QNteElik8D + rm /tmp/tmp.0jdjyq3AJu /tmp/tmp.QNteElik8D + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ECh1hzU6lc ++ mktemp + local LAST_ERR=/tmp/tmp.piTdJDVgqN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ECh1hzU6lc + cat /tmp/tmp.piTdJDVgqN + rm /tmp/tmp.ECh1hzU6lc /tmp/tmp.piTdJDVgqN + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.RHRPAP00nG ++ mktemp + local LAST_ERR=/tmp/tmp.SQAeu94V8f + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RHRPAP00nG + cat /tmp/tmp.SQAeu94V8f + rm /tmp/tmp.RHRPAP00nG /tmp/tmp.SQAeu94V8f + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.Fi1GkhCTRz ++ mktemp + local LAST_ERR=/tmp/tmp.1kCHTYW59r + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Fi1GkhCTRz + cat /tmp/tmp.1kCHTYW59r + rm /tmp/tmp.Fi1GkhCTRz /tmp/tmp.1kCHTYW59r + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.N1jWAGkNHl ++ mktemp + local LAST_ERR=/tmp/tmp.dDUseaD1G6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1566/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.N1jWAGkNHl clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.dDUseaD1G6 + rm /tmp/tmp.N1jWAGkNHl /tmp/tmp.dDUseaD1G6 + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.vCo80kzQwj ++ mktemp + local LAST_ERR=/tmp/tmp.jgZaTZVfw6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.vCo80kzQwj + cat /tmp/tmp.jgZaTZVfw6 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.vCo80kzQwj + cat /tmp/tmp.jgZaTZVfw6 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.vCo80kzQwj + cat /tmp/tmp.jgZaTZVfw6 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.vCo80kzQwj + cat /tmp/tmp.jgZaTZVfw6 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.vCo80kzQwj /tmp/tmp.jgZaTZVfw6 + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace pitr-sharded-26322 + rm -rf /tmp/tmp.646agzkQVK ++ mktemp + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.4zpfoDW034 + local LAST_OUT=/tmp/tmp.b2X6U9GK39 ++ mktemp + local LAST_ERR=/tmp/tmp.Be0qkFKiv4 + local exit_status=0 + local timeout=4 + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ seq 0 2 ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pitr-sharded-26322 + local LAST_ERR=/tmp/tmp.8vZGfnIhWZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator