Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/logs/demand-backup-physical-sharded.log WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 + create_infra demand-backup-physical-sharded-4995 + local ns=demand-backup-physical-sharded-4995 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.wWJMJKr0wu ++ mktemp + local LAST_ERR=/tmp/tmp.GQVScSl9cG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wWJMJKr0wu customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.GQVScSl9cG + rm /tmp/tmp.wWJMJKr0wu /tmp/tmp.GQVScSl9cG + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-physical-sharded-21087 backup-aws-s3-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-aws-s3-sharded patched + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.7urWqwg2AL ++ mktemp + local LAST_ERR=/tmp/tmp.L4hXRahgiR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7urWqwg2AL + cat /tmp/tmp.L4hXRahgiR + rm /tmp/tmp.7urWqwg2AL /tmp/tmp.L4hXRahgiR + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.2BsmRd7Kph ++ mktemp + local LAST_ERR=/tmp/tmp.qyULYnES09 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2BsmRd7Kph + cat /tmp/tmp.qyULYnES09 + rm /tmp/tmp.2BsmRd7Kph /tmp/tmp.qyULYnES09 + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ZPCp66ilTN ++ mktemp + local LAST_ERR=/tmp/tmp.tRAQwlDd0y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZPCp66ilTN + cat /tmp/tmp.tRAQwlDd0y + rm /tmp/tmp.ZPCp66ilTN /tmp/tmp.tRAQwlDd0y + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.mzQBTFHbPx ++ mktemp + local LAST_ERR=/tmp/tmp.OYEhgVMI2X + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mzQBTFHbPx clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.OYEhgVMI2X + rm /tmp/tmp.mzQBTFHbPx /tmp/tmp.OYEhgVMI2X + return 0 + check_crd_for_deletion PR-1569-eb818ce7 + local git_tag=PR-1569-eb818ce7 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1569-eb818ce7/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed s/---//g ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SmoLsscmcG +++ mktemp ++ local LAST_ERR=/tmp/tmp.PA2xzeLQ7u ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.SmoLsscmcG ++ cat /tmp/tmp.PA2xzeLQ7u Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.SmoLsscmcG ++ cat /tmp/tmp.PA2xzeLQ7u Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.SmoLsscmcG ++ cat /tmp/tmp.PA2xzeLQ7u Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.SmoLsscmcG ++ cat /tmp/tmp.PA2xzeLQ7u Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.SmoLsscmcG /tmp/tmp.PA2xzeLQ7u ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp ++ mktemp + awk '{print$1}' + local LAST_OUT=/tmp/tmp.AOgUG9LagR + local LAST_OUT=/tmp/tmp.miEuovk9n6 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.nKOXFVoNBm + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.SLc1Mnw4Eq + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.miEuovk9n6 + cat /tmp/tmp.SLc1Mnw4Eq + rm /tmp/tmp.miEuovk9n6 /tmp/tmp.SLc1Mnw4Eq + return 0 namespace "demand-backup-physical-sharded-21087" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AOgUG9LagR namespace "psmdb-operator" deleted + cat /tmp/tmp.nKOXFVoNBm + rm /tmp/tmp.AOgUG9LagR /tmp/tmp.nKOXFVoNBm + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.tU3dhQYBFV ++ mktemp + local LAST_ERR=/tmp/tmp.QIHh9Mq34O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tU3dhQYBFV + cat /tmp/tmp.QIHh9Mq34O + rm /tmp/tmp.tU3dhQYBFV /tmp/tmp.QIHh9Mq34O + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.kc9pzDcNXV ++ mktemp + local LAST_ERR=/tmp/tmp.hBRE1vQQpm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kc9pzDcNXV namespace/psmdb-operator created + cat /tmp/tmp.hBRE1vQQpm + rm /tmp/tmp.kc9pzDcNXV /tmp/tmp.hBRE1vQQpm + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.JkUFcvDLEA +++ mktemp ++ local LAST_ERR=/tmp/tmp.w5fX7eCv0U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JkUFcvDLEA ++ cat /tmp/tmp.w5fX7eCv0U ++ rm /tmp/tmp.JkUFcvDLEA /tmp/tmp.w5fX7eCv0U ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1569-eb818ce7-2-cluster1 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.yrUs4etN6u ++ mktemp + local LAST_ERR=/tmp/tmp.kAO0gI9UNH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1569-eb818ce7-2-cluster1 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yrUs4etN6u Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1569-eb818ce7-2-cluster1" modified. + cat /tmp/tmp.kAO0gI9UNH + rm /tmp/tmp.yrUs4etN6u /tmp/tmp.kAO0gI9UNH + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.i6rBcPXBRD ++ mktemp + local LAST_ERR=/tmp/tmp.MkUPM41pbW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.i6rBcPXBRD customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.MkUPM41pbW + rm /tmp/tmp.i6rBcPXBRD /tmp/tmp.MkUPM41pbW + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.jxmwRC4Sze ++ mktemp + local LAST_ERR=/tmp/tmp.rMisrsq6CE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jxmwRC4Sze clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.rMisrsq6CE + rm /tmp/tmp.jxmwRC4Sze /tmp/tmp.rMisrsq6CE + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1569-eb818ce7") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | + kubectl_bin apply -f - ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.YqnFAhIOoM ++ mktemp + local LAST_ERR=/tmp/tmp.i7779dzu2F + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YqnFAhIOoM deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.i7779dzu2F + rm /tmp/tmp.YqnFAhIOoM /tmp/tmp.i7779dzu2F + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.ylDX11JDFY +++ mktemp ++ local LAST_ERR=/tmp/tmp.fWdpRVwyFY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ylDX11JDFY ++ cat /tmp/tmp.fWdpRVwyFY ++ rm /tmp/tmp.ylDX11JDFY /tmp/tmp.fWdpRVwyFY ++ return 0 + wait_pod percona-server-mongodb-operator-8d99b8f59-6s46l + local pod=percona-server-mongodb-operator-8d99b8f59-6s46l + set +o xtrace waiting for pod/percona-server-mongodb-operator-8d99b8f59-6s46l to be ready.OK + create_namespace demand-backup-physical-sharded-4995 + local namespace=demand-backup-physical-sharded-4995 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + '[' -n '' ']' ++ mktemp + desc 'cleaned up old namespaces demand-backup-physical-sharded-4995' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-physical-sharded-4995 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace demand-backup-physical-sharded-4995 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.9f5ZVlLIXS + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.xd4xTCTmU7 + local LAST_ERR=/tmp/tmp.QvJ61k8kjn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ mktemp + local LAST_ERR=/tmp/tmp.nZbCG0UF2x + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace demand-backup-physical-sharded-4995 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9f5ZVlLIXS + cat /tmp/tmp.QvJ61k8kjn + rm /tmp/tmp.9f5ZVlLIXS /tmp/tmp.QvJ61k8kjn + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xd4xTCTmU7 + cat /tmp/tmp.nZbCG0UF2x + rm /tmp/tmp.xd4xTCTmU7 /tmp/tmp.nZbCG0UF2x + return 0 + kubectl_bin wait --for=delete namespace demand-backup-physical-sharded-4995 ++ mktemp + local LAST_OUT=/tmp/tmp.wykF2dhr84 ++ mktemp + local LAST_ERR=/tmp/tmp.EnEdbfhs3E + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace demand-backup-physical-sharded-4995 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wykF2dhr84 + cat /tmp/tmp.EnEdbfhs3E + rm /tmp/tmp.wykF2dhr84 /tmp/tmp.EnEdbfhs3E + return 0 + desc 'create namespace demand-backup-physical-sharded-4995' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-physical-sharded-4995 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-physical-sharded-4995 ++ mktemp + local LAST_OUT=/tmp/tmp.SvZP5UzOMA ++ mktemp + local LAST_ERR=/tmp/tmp.U47iAz8lFo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace demand-backup-physical-sharded-4995 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SvZP5UzOMA namespace/demand-backup-physical-sharded-4995 created + cat /tmp/tmp.U47iAz8lFo + rm /tmp/tmp.SvZP5UzOMA /tmp/tmp.U47iAz8lFo + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.WrZT2BdZVB +++ mktemp ++ local LAST_ERR=/tmp/tmp.rxcBrPEw5J ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WrZT2BdZVB ++ cat /tmp/tmp.rxcBrPEw5J ++ rm /tmp/tmp.WrZT2BdZVB /tmp/tmp.rxcBrPEw5J ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1569-eb818ce7-2-cluster1 --namespace=demand-backup-physical-sharded-4995 ++ mktemp + local LAST_OUT=/tmp/tmp.iiro3t5OG3 ++ mktemp + local LAST_ERR=/tmp/tmp.y6PbbuY2EP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1569-eb818ce7-2-cluster1 --namespace=demand-backup-physical-sharded-4995 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iiro3t5OG3 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1569-eb818ce7-2-cluster1" modified. + cat /tmp/tmp.y6PbbuY2EP + rm /tmp/tmp.iiro3t5OG3 /tmp/tmp.y6PbbuY2EP + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Mon Jun 10 14:17:13 2024 NAMESPACE: demand-backup-physical-sharded-4995 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.demand-backup-physical-sharded-4995.svc.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace demand-backup-physical-sharded-4995 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace demand-backup-physical-sharded-4995 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace demand-backup-physical-sharded-4995 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace demand-backup-physical-sharded-4995 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1ScABPsHpM +++ mktemp ++ local LAST_ERR=/tmp/tmp.GTt1dqfQY6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1ScABPsHpM ++ cat /tmp/tmp.GTt1dqfQY6 ++ rm /tmp/tmp.1ScABPsHpM /tmp/tmp.GTt1dqfQY6 ++ return 0 + MINIO_POD=minio-service-57dd49b-29vdw + wait_pod minio-service-57dd49b-29vdw + local pod=minio-service-57dd49b-29vdw + set +o xtrace waiting for pod/minio-service-57dd49b-29vdw to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-physical-sharded-4995.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.yNnyr32NfL ++ mktemp + local LAST_ERR=/tmp/tmp.74olqpnw5e + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-physical-sharded-4995.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yNnyr32NfL service/minio-service created + cat /tmp/tmp.74olqpnw5e + rm /tmp/tmp.yNnyr32NfL /tmp/tmp.74olqpnw5e + return 0 + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.x5FAFO4skG ++ mktemp + local LAST_ERR=/tmp/tmp.mWAviGc2Ao + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.x5FAFO4skG make_bucket: operator-testing pod "aws-cli" deleted + cat /tmp/tmp.mWAviGc2Ao + rm /tmp/tmp.x5FAFO4skG /tmp/tmp.mWAviGc2Ao + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.IgfVxG5b71 ++ mktemp + local LAST_ERR=/tmp/tmp.ZBMd1CisSl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IgfVxG5b71 secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.ZBMd1CisSl + rm /tmp/tmp.IgfVxG5b71 /tmp/tmp.ZBMd1CisSl + return 0 + desc 'Testing on sharded cluster' + set +o xtrace ----------------------------------------------------------------------------------- Testing on sharded cluster ----------------------------------------------------------------------------------- + echo 'Creating PSMDB cluster' Creating PSMDB cluster + cluster=some-name + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.M6J8pONoEo ++ mktemp + local LAST_ERR=/tmp/tmp.OtLqa4ujkR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.M6J8pONoEo secret/some-users created + cat /tmp/tmp.OtLqa4ujkR + rm /tmp/tmp.M6J8pONoEo /tmp/tmp.OtLqa4ujkR + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/conf/some-name-sharded.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/conf/some-name-sharded.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/conf/some-name-sharded.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1569-eb818ce7"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + local LAST_OUT=/tmp/tmp.YIUTysyPFY ++ mktemp + local LAST_ERR=/tmp/tmp.c688el0YC6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YIUTysyPFY perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.c688el0YC6 + rm /tmp/tmp.YIUTysyPFY /tmp/tmp.c688el0YC6 + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.PYr3Lxdl8B ++ mktemp + local LAST_ERR=/tmp/tmp.rTPSRydpUQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/conf/client_with_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PYr3Lxdl8B deployment.apps/psmdb-client created + cat /tmp/tmp.rTPSRydpUQ + rm /tmp/tmp.PYr3Lxdl8B /tmp/tmp.rTPSRydpUQ + return 0 + echo 'check if all pods started' check if all pods started + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready......OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PBoV8YNQGP +++ mktemp ++ local LAST_ERR=/tmp/tmp.Mw6GiCks6e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PBoV8YNQGP ++ cat /tmp/tmp.Mw6GiCks6e ++ rm /tmp/tmp.PBoV8YNQGP /tmp/tmp.Mw6GiCks6e ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Nndjs9chCx +++ mktemp ++ local LAST_ERR=/tmp/tmp.wMueMT4o9V ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Nndjs9chCx ++ cat /tmp/tmp.wMueMT4o9V ++ rm /tmp/tmp.Nndjs9chCx /tmp/tmp.wMueMT4o9V ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness..................................... + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.v8pn45Os9y +++ mktemp ++ local LAST_ERR=/tmp/tmp.D7BzGFbhmh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v8pn45Os9y ++ cat /tmp/tmp.D7BzGFbhmh ++ rm /tmp/tmp.v8pn45Os9y /tmp/tmp.D7BzGFbhmh ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rHE1yjnDlz +++ mktemp ++ local LAST_ERR=/tmp/tmp.mgnORoEBpy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rHE1yjnDlz ++ cat /tmp/tmp.mgnORoEBpy ++ rm /tmp/tmp.rHE1yjnDlz /tmp/tmp.mgnORoEBpy ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vPFfGsrew5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.4j3KDXSWrZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vPFfGsrew5 ++ cat /tmp/tmp.4j3KDXSWrZ ++ rm /tmp/tmp.vPFfGsrew5 /tmp/tmp.4j3KDXSWrZ ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9aDNwpqeI6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.aSgcSHLN1H ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9aDNwpqeI6 ++ cat /tmp/tmp.aSgcSHLN1H ++ rm /tmp/tmp.9aDNwpqeI6 /tmp/tmp.aSgcSHLN1H ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.h6r5ucKMdN +++ mktemp ++ local LAST_ERR=/tmp/tmp.aMSykhqooO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.h6r5ucKMdN ++ cat /tmp/tmp.aMSykhqooO ++ rm /tmp/tmp.h6r5ucKMdN /tmp/tmp.aMSykhqooO ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get svc some-name-mongos '-o=jsonpath={.status}' ++ jq -r 'select(.loadBalancer != null and .loadBalancer.ingress != null and .loadBalancer.ingress != []) | .loadBalancer.ingress[0][]' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0huWge50SO +++ mktemp ++ local LAST_ERR=/tmp/tmp.LisR5Xy9u9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get svc some-name-mongos '-o=jsonpath={.status}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0huWge50SO ++ cat /tmp/tmp.LisR5Xy9u9 ++ rm /tmp/tmp.0huWge50SO /tmp/tmp.LisR5Xy9u9 ++ return 0 + lbEndpoint=34.45.38.207 + '[' -z 34.45.38.207 ']' + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.demand-backup-physical-sharded-4995 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.demand-backup-physical-sharded-4995 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.h9WUX3SRvK +++ mktemp ++ local LAST_ERR=/tmp/tmp.8LpQuDAFlz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.h9WUX3SRvK ++ cat /tmp/tmp.8LpQuDAFlz ++ rm /tmp/tmp.h9WUX3SRvK /tmp/tmp.8LpQuDAFlz ++ return 0 + local client_container=psmdb-client-5f578b7f94-m7mqs + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.vmDwcgOdhy ++ mktemp + local LAST_ERR=/tmp/tmp.0nO08gjgbv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vmDwcgOdhy Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("1274428b-0d55-418b-9292-a048b298db69") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.0nO08gjgbv + rm /tmp/tmp.vmDwcgOdhy /tmp/tmp.0nO08gjgbv + return 0 + sleep 1 + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qhEoGB2flD +++ mktemp ++ local LAST_ERR=/tmp/tmp.Nnf3f4j0iv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qhEoGB2flD ++ cat /tmp/tmp.Nnf3f4j0iv ++ rm /tmp/tmp.qhEoGB2flD /tmp/tmp.Nnf3f4j0iv ++ return 0 + local client_container=psmdb-client-5f578b7f94-m7mqs + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.jI0Fcu1JbS ++ mktemp + local LAST_ERR=/tmp/tmp.vW85UUfKIU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jI0Fcu1JbS Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("edeb58cd-0c25-42ac-ac1f-13e44cd781e1") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.vW85UUfKIU + rm /tmp/tmp.jI0Fcu1JbS /tmp/tmp.vW85UUfKIU + return 0 + sleep 5 + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local driver=mongodb + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aNBrxd9nCH +++ mktemp ++ local LAST_ERR=/tmp/tmp.I2KXP3ZwFR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aNBrxd9nCH ++ cat /tmp/tmp.I2KXP3ZwFR ++ rm /tmp/tmp.aNBrxd9nCH /tmp/tmp.I2KXP3ZwFR ++ return 0 + local client_container=psmdb-client-5f578b7f94-m7mqs + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.VHxkvfRS8X ++ mktemp + local LAST_ERR=/tmp/tmp.86yQQzwvKq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VHxkvfRS8X + cat /tmp/tmp.86yQQzwvKq + rm /tmp/tmp.VHxkvfRS8X /tmp/tmp.86yQQzwvKq + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/find-sharded.json /tmp/tmp.mfU7C0j586/find-sharded + echo 'waiting 60 seconds for stable timestamp in wiredtiger' waiting 60 seconds for stable timestamp in wiredtiger + sleep 60 + echo 'running backups' running backups + backup_name_minio=backup-minio-sharded + run_backup minio backup-minio-sharded + local storage=minio + local backup_name=backup-minio-sharded + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/conf/backup.yml + kubectl_bin apply -f - + /usr/bin/sed -e 's/name:/name: backup-minio-sharded/' + /usr/bin/sed -e 's/storageName:/storageName: minio/' ++ mktemp + local LAST_OUT=/tmp/tmp.X8b0rY7UR1 ++ mktemp + local LAST_ERR=/tmp/tmp.svV1cOkcrN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.X8b0rY7UR1 perconaservermongodbbackup.psmdb.percona.com/backup-minio-sharded created + cat /tmp/tmp.svV1cOkcrN + rm /tmp/tmp.X8b0rY7UR1 /tmp/tmp.svV1cOkcrN + return 0 + '[' -z '' ']' + backup_name_aws=backup-aws-s3-sharded + backup_name_gcp=backup-gcp-cs-sharded + backup_name_azure=backup-azure-blob-sharded + run_backup aws-s3 backup-aws-s3-sharded + local storage=aws-s3 + local backup_name=backup-aws-s3-sharded + /usr/bin/sed -e 's/name:/name: backup-aws-s3-sharded/' + /usr/bin/sed -e 's/storageName:/storageName: aws-s3/' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/conf/backup.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.j0rcauSLLK ++ mktemp + local LAST_ERR=/tmp/tmp.0FbNxZcMWD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.j0rcauSLLK perconaservermongodbbackup.psmdb.percona.com/backup-aws-s3-sharded created + cat /tmp/tmp.0FbNxZcMWD + rm /tmp/tmp.j0rcauSLLK /tmp/tmp.0FbNxZcMWD + return 0 + run_backup gcp-cs backup-gcp-cs-sharded + local storage=gcp-cs + local backup_name=backup-gcp-cs-sharded + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/conf/backup.yml + /usr/bin/sed -e 's/name:/name: backup-gcp-cs-sharded/' + kubectl_bin apply -f - ++ mktemp + /usr/bin/sed -e 's/storageName:/storageName: gcp-cs/' + local LAST_OUT=/tmp/tmp.IqdafoWF9V ++ mktemp + local LAST_ERR=/tmp/tmp.liS7z2WaK3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IqdafoWF9V perconaservermongodbbackup.psmdb.percona.com/backup-gcp-cs-sharded created + cat /tmp/tmp.liS7z2WaK3 + rm /tmp/tmp.IqdafoWF9V /tmp/tmp.liS7z2WaK3 + return 0 + run_backup azure-blob backup-azure-blob-sharded + local storage=azure-blob + local backup_name=backup-azure-blob-sharded + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/conf/backup.yml + /usr/bin/sed -e 's/storageName:/storageName: azure-blob/' + kubectl_bin apply -f - + /usr/bin/sed -e 's/name:/name: backup-azure-blob-sharded/' ++ mktemp + local LAST_OUT=/tmp/tmp.uzxMfZ2z5u ++ mktemp + local LAST_ERR=/tmp/tmp.RhBUAFKVtx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uzxMfZ2z5u perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob-sharded created + cat /tmp/tmp.RhBUAFKVtx + rm /tmp/tmp.uzxMfZ2z5u /tmp/tmp.RhBUAFKVtx + return 0 + wait_backup backup-aws-s3-sharded + local backup_name=backup-aws-s3-sharded + set +o xtrace backup-aws-s3-sharded................................................ + wait_backup backup-gcp-cs-sharded + local backup_name=backup-gcp-cs-sharded + set +o xtrace backup-gcp-cs-sharded......................... + wait_backup backup-azure-blob-sharded + local backup_name=backup-azure-blob-sharded + set +o xtrace backup-azure-blob-sharded....................... + wait_backup backup-minio-sharded + local backup_name=backup-minio-sharded + set +o xtrace backup-minio-sharded. + '[' -z '' ']' + echo 'drop collection' drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tuK7JwpeVC +++ mktemp ++ local LAST_ERR=/tmp/tmp.L6G8kkiTyI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tuK7JwpeVC ++ cat /tmp/tmp.L6G8kkiTyI ++ rm /tmp/tmp.tuK7JwpeVC /tmp/tmp.L6G8kkiTyI ++ return 0 + local client_container=psmdb-client-5f578b7f94-m7mqs + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.55KHIYCC1f ++ mktemp + local LAST_ERR=/tmp/tmp.Q32SebKTbt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.55KHIYCC1f Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("88459502-d704-43bf-a934-326ad1f840a2") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.Q32SebKTbt + rm /tmp/tmp.55KHIYCC1f /tmp/tmp.Q32SebKTbt + return 0 + echo 'check backup and restore -- aws-s3' check backup and restore -- aws-s3 + run_restore backup-aws-s3-sharded _restore_sharded + local backup_name=backup-aws-s3-sharded + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/conf/restore.yml + /usr/bin/sed -e 's/name:/name: restore-backup-aws-s3-sharded/' + /usr/bin/sed -e 's/backupName:/backupName: backup-aws-s3-sharded/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.tFGzz3Y8yH ++ mktemp + local LAST_ERR=/tmp/tmp.1bFu5W5dHW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tFGzz3Y8yH perconaservermongodbrestore.psmdb.percona.com/restore-backup-aws-s3-sharded created + cat /tmp/tmp.1bFu5W5dHW + rm /tmp/tmp.tFGzz3Y8yH /tmp/tmp.1bFu5W5dHW + return 0 + run_recovery_check backup-aws-s3-sharded _restore_sharded + local backup_name=backup-aws-s3-sharded + local compare_suffix=_restore_sharded + wait_restore backup-aws-s3-sharded some-name requested 0 1200 + local backup_name=backup-aws-s3-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + set +o xtrace waiting psmdb-restore/backup-aws-s3-sharded to reach requested state........................................................................................................................................................ + '[' 0 -eq 1 ']' + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-physical-sharded-4995", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.CyyHbLgghB ++ mktemp + local LAST_ERR=/tmp/tmp.xAFds77gY8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CyyHbLgghB + cat /tmp/tmp.xAFds77gY8 + rm /tmp/tmp.CyyHbLgghB /tmp/tmp.xAFds77gY8 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + wait_restore backup-aws-s3-sharded some-name ready 0 1800 + local backup_name=backup-aws-s3-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + set +o xtrace waiting psmdb-restore/backup-aws-s3-sharded to reach ready state....................................................................................................... + '[' 0 -eq 1 ']' + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.JWhXQb5eEA ++ mktemp + local LAST_ERR=/tmp/tmp.DTtrCPMC3k + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JWhXQb5eEA apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-physical-sharded-4995"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-physical-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"minio":{"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod7.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"exposeType":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2024-06-10T14:17:56Z" generation: 2 name: some-name namespace: demand-backup-physical-sharded-4995 resourceVersion: "18744" uid: 34d37458-02e9-4004-b8b2-97fe3b42d62a spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-physical-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 minio: s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.17.0 image: perconalab/percona-server-mongodb-operator:main-mongod7.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: exposeType: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: conditions: - lastTransitionTime: "2024-06-10T14:17:59Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:19:35Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:19:35Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:20:16Z" reason: MongosReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:20:16Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:20:26Z" status: "True" type: ready - lastTransitionTime: "2024-06-10T14:25:26Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:26:05Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:26:05Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:26:32Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:26:32Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:26:45Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:26:44Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:27:05Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:27:05Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:27:18Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:27:18Z" status: "True" type: initializing host: 34.45.38.207 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod7.0 mongoVersion: 7.0.11-6 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 6 replsets: cfg: initialized: true ready: 3 size: 3 status: ready rs0: added_as_shard: true initialized: true ready: 3 size: 3 status: ready size: 6 state: initializing + cat /tmp/tmp.DTtrCPMC3k + rm /tmp/tmp.JWhXQb5eEA /tmp/tmp.DTtrCPMC3k + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nAx8eupjcG +++ mktemp ++ local LAST_ERR=/tmp/tmp.GmeNGaMcj7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nAx8eupjcG ++ cat /tmp/tmp.GmeNGaMcj7 ++ rm /tmp/tmp.nAx8eupjcG /tmp/tmp.GmeNGaMcj7 ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yixnX3IoCl +++ mktemp ++ local LAST_ERR=/tmp/tmp.2G0fQwDuwC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yixnX3IoCl ++ cat /tmp/tmp.2G0fQwDuwC ++ rm /tmp/tmp.yixnX3IoCl /tmp/tmp.2G0fQwDuwC ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.M4LyazzwIO +++ mktemp ++ local LAST_ERR=/tmp/tmp.KLU0VXSwUH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M4LyazzwIO ++ cat /tmp/tmp.KLU0VXSwUH ++ rm /tmp/tmp.M4LyazzwIO /tmp/tmp.KLU0VXSwUH ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cA3Ittu6Fp +++ mktemp ++ local LAST_ERR=/tmp/tmp.gLJ3LYhNWT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cA3Ittu6Fp ++ cat /tmp/tmp.gLJ3LYhNWT ++ rm /tmp/tmp.cA3Ittu6Fp /tmp/tmp.gLJ3LYhNWT ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.polEGF6T9V +++ mktemp ++ local LAST_ERR=/tmp/tmp.pRgqqtFEF6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.polEGF6T9V ++ cat /tmp/tmp.pRgqqtFEF6 ++ rm /tmp/tmp.polEGF6T9V /tmp/tmp.pRgqqtFEF6 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.U4Q6L6MBBe +++ mktemp ++ local LAST_ERR=/tmp/tmp.37l6v92eol ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.U4Q6L6MBBe ++ cat /tmp/tmp.37l6v92eol ++ rm /tmp/tmp.U4Q6L6MBBe /tmp/tmp.37l6v92eol ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cgAnrLEBj8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.lle0ffaXBy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cgAnrLEBj8 ++ cat /tmp/tmp.lle0ffaXBy ++ rm /tmp/tmp.cgAnrLEBj8 /tmp/tmp.lle0ffaXBy ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0HXh3WUCr0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.v7iQhXHgZt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0HXh3WUCr0 ++ cat /tmp/tmp.v7iQhXHgZt ++ rm /tmp/tmp.0HXh3WUCr0 /tmp/tmp.v7iQhXHgZt ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zsQuoMlYgr +++ mktemp ++ local LAST_ERR=/tmp/tmp.oFXqPQJPQk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zsQuoMlYgr ++ cat /tmp/tmp.oFXqPQJPQk ++ rm /tmp/tmp.zsQuoMlYgr /tmp/tmp.oFXqPQJPQk ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3AuG6SDyWY +++ mktemp ++ local LAST_ERR=/tmp/tmp.ph0q5LuJ6E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3AuG6SDyWY ++ cat /tmp/tmp.ph0q5LuJ6E ++ rm /tmp/tmp.3AuG6SDyWY /tmp/tmp.ph0q5LuJ6E ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pc7ns1kybo +++ mktemp ++ local LAST_ERR=/tmp/tmp.rAhIDfC3u5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pc7ns1kybo ++ cat /tmp/tmp.rAhIDfC3u5 ++ rm /tmp/tmp.pc7ns1kybo /tmp/tmp.rAhIDfC3u5 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ina1qPF9Xk +++ mktemp ++ local LAST_ERR=/tmp/tmp.jMP1T23dhO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ina1qPF9Xk ++ cat /tmp/tmp.jMP1T23dhO ++ rm /tmp/tmp.Ina1qPF9Xk /tmp/tmp.jMP1T23dhO ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rZrRvPWcZc +++ mktemp ++ local LAST_ERR=/tmp/tmp.mJqx4VPkRc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rZrRvPWcZc ++ cat /tmp/tmp.mJqx4VPkRc ++ rm /tmp/tmp.rZrRvPWcZc /tmp/tmp.mJqx4VPkRc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.p57031R0zu +++ mktemp ++ local LAST_ERR=/tmp/tmp.28vicz0No3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.p57031R0zu ++ cat /tmp/tmp.28vicz0No3 ++ rm /tmp/tmp.p57031R0zu /tmp/tmp.28vicz0No3 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oYOdLkYDz6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.GSTIG75BkA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oYOdLkYDz6 ++ cat /tmp/tmp.GSTIG75BkA ++ rm /tmp/tmp.oYOdLkYDz6 /tmp/tmp.GSTIG75BkA ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.a0EepsHWkT +++ mktemp ++ local LAST_ERR=/tmp/tmp.aT2KGZ8Bvb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.a0EepsHWkT ++ cat /tmp/tmp.aT2KGZ8Bvb ++ rm /tmp/tmp.a0EepsHWkT /tmp/tmp.aT2KGZ8Bvb ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jmBMzVJ431 +++ mktemp ++ local LAST_ERR=/tmp/tmp.xlVsGPWkmB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jmBMzVJ431 ++ cat /tmp/tmp.xlVsGPWkmB ++ rm /tmp/tmp.jmBMzVJ431 /tmp/tmp.xlVsGPWkmB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.o6UjnaWLq8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.R71C4RUAYA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.o6UjnaWLq8 ++ cat /tmp/tmp.R71C4RUAYA ++ rm /tmp/tmp.o6UjnaWLq8 /tmp/tmp.R71C4RUAYA ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 17 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.13eDwKrDLE +++ mktemp ++ local LAST_ERR=/tmp/tmp.WrLfQk8uqp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.13eDwKrDLE ++ cat /tmp/tmp.WrLfQk8uqp ++ rm /tmp/tmp.13eDwKrDLE /tmp/tmp.WrLfQk8uqp ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 18 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xkTGZVy1C9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.TcIE1Yzdmc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xkTGZVy1C9 ++ cat /tmp/tmp.TcIE1Yzdmc ++ rm /tmp/tmp.xkTGZVy1C9 /tmp/tmp.TcIE1Yzdmc ++ return 0 + [[ ready == \r\e\a\d\y ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eeKrE3atIQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.kGYv4UYchf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eeKrE3atIQ ++ cat /tmp/tmp.kGYv4UYchf ++ rm /tmp/tmp.eeKrE3atIQ /tmp/tmp.kGYv4UYchf ++ return 0 + local client_container=psmdb-client-5f578b7f94-m7mqs + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.q1uiv8lDDS ++ mktemp + local LAST_ERR=/tmp/tmp.dyEkp519Ua + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.q1uiv8lDDS + cat /tmp/tmp.dyEkp519Ua + rm /tmp/tmp.q1uiv8lDDS /tmp/tmp.dyEkp519Ua + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/find-sharded.json /tmp/tmp.mfU7C0j586/find-sharded + echo + set -o xtrace + check_exported_mongos_service_endpoint 34.45.38.207 + local host=34.45.38.207 ++ kubectl_bin get psmdb some-name '-o=jsonpath={.status.host}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nbTkjjniFI +++ mktemp ++ local LAST_ERR=/tmp/tmp.AOU5cQYyCy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name '-o=jsonpath={.status.host}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nbTkjjniFI ++ cat /tmp/tmp.AOU5cQYyCy ++ rm /tmp/tmp.nbTkjjniFI /tmp/tmp.AOU5cQYyCy ++ return 0 + '[' 34.45.38.207 '!=' 34.45.38.207 ']' + echo 'drop collection' drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yhGIOpoFu7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.iCqrogt4rG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yhGIOpoFu7 ++ cat /tmp/tmp.iCqrogt4rG ++ rm /tmp/tmp.yhGIOpoFu7 /tmp/tmp.iCqrogt4rG ++ return 0 + local client_container=psmdb-client-5f578b7f94-m7mqs + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.OPHsSteaic ++ mktemp + local LAST_ERR=/tmp/tmp.sU5tQgNq14 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OPHsSteaic Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("f5e215a4-32ab-48c3-9377-f0cce86b7812") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.sU5tQgNq14 + rm /tmp/tmp.OPHsSteaic /tmp/tmp.sU5tQgNq14 + return 0 + echo 'check backup and restore -- gcp-cs' check backup and restore -- gcp-cs + run_restore backup-gcp-cs-sharded _restore_sharded + local backup_name=backup-gcp-cs-sharded + /usr/bin/sed -e 's/name:/name: restore-backup-gcp-cs-sharded/' + /usr/bin/sed -e 's/backupName:/backupName: backup-gcp-cs-sharded/' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/conf/restore.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.IrRBdsa6JL ++ mktemp + local LAST_ERR=/tmp/tmp.4xMNwOBgKG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IrRBdsa6JL perconaservermongodbrestore.psmdb.percona.com/restore-backup-gcp-cs-sharded created + cat /tmp/tmp.4xMNwOBgKG + rm /tmp/tmp.IrRBdsa6JL /tmp/tmp.4xMNwOBgKG + return 0 + run_recovery_check backup-gcp-cs-sharded _restore_sharded + local backup_name=backup-gcp-cs-sharded + local compare_suffix=_restore_sharded + wait_restore backup-gcp-cs-sharded some-name requested 0 1200 + local backup_name=backup-gcp-cs-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + set +o xtrace waiting psmdb-restore/backup-gcp-cs-sharded to reach requested state......................................................................................................................................................................................................... + '[' 0 -eq 1 ']' + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-physical-sharded-4995", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.NIWtdbxQIL ++ mktemp + local LAST_ERR=/tmp/tmp.F1CWEfl4e6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NIWtdbxQIL + cat /tmp/tmp.F1CWEfl4e6 + rm /tmp/tmp.NIWtdbxQIL /tmp/tmp.F1CWEfl4e6 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + wait_restore backup-gcp-cs-sharded some-name ready 0 1800 + local backup_name=backup-gcp-cs-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + set +o xtrace waiting psmdb-restore/backup-gcp-cs-sharded to reach ready state............................................................................................................. + '[' 0 -eq 1 ']' + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.JAOIerlUot ++ mktemp + local LAST_ERR=/tmp/tmp.2sDjqS4sSY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JAOIerlUot apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-physical-sharded-4995"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-physical-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"minio":{"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod7.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"exposeType":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2024-06-10T14:17:56Z" generation: 2 name: some-name namespace: demand-backup-physical-sharded-4995 resourceVersion: "24532" uid: 34d37458-02e9-4004-b8b2-97fe3b42d62a spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-physical-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 minio: s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.17.0 image: perconalab/percona-server-mongodb-operator:main-mongod7.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: exposeType: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: conditions: - lastTransitionTime: "2024-06-10T14:27:18Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:27:18Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:35:09Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:35:09Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:35:45Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:35:45Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:35:49Z" reason: MongosReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:36:11Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:36:44Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:36:44Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:36:50Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:36:50Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:37:18Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:37:17Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:37:31Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:37:31Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:37:50Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:37:50Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:38:03Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:38:03Z" status: "True" type: initializing host: 34.45.38.207 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod7.0 mongoVersion: 7.0.11-6 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 6 replsets: cfg: initialized: true ready: 3 size: 3 status: ready rs0: added_as_shard: true initialized: true ready: 3 size: 3 status: ready size: 6 state: initializing + cat /tmp/tmp.2sDjqS4sSY + rm /tmp/tmp.JAOIerlUot /tmp/tmp.2sDjqS4sSY + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3eaiFegTK6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.dCatVMZEcP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3eaiFegTK6 ++ cat /tmp/tmp.dCatVMZEcP ++ rm /tmp/tmp.3eaiFegTK6 /tmp/tmp.dCatVMZEcP ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IKow9g9yjb +++ mktemp ++ local LAST_ERR=/tmp/tmp.xfxUjfwWTb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IKow9g9yjb ++ cat /tmp/tmp.xfxUjfwWTb ++ rm /tmp/tmp.IKow9g9yjb /tmp/tmp.xfxUjfwWTb ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ymz6gDGScd +++ mktemp ++ local LAST_ERR=/tmp/tmp.d6nA8b0T07 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ymz6gDGScd ++ cat /tmp/tmp.d6nA8b0T07 ++ rm /tmp/tmp.Ymz6gDGScd /tmp/tmp.d6nA8b0T07 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.M3eHL1NwVq +++ mktemp ++ local LAST_ERR=/tmp/tmp.aNLFZXPFr3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M3eHL1NwVq ++ cat /tmp/tmp.aNLFZXPFr3 ++ rm /tmp/tmp.M3eHL1NwVq /tmp/tmp.aNLFZXPFr3 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iZMdfJR8dB +++ mktemp ++ local LAST_ERR=/tmp/tmp.MMPT2tfY0V ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iZMdfJR8dB ++ cat /tmp/tmp.MMPT2tfY0V ++ rm /tmp/tmp.iZMdfJR8dB /tmp/tmp.MMPT2tfY0V ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZyUb1u1XsR +++ mktemp ++ local LAST_ERR=/tmp/tmp.mMixLP7mJm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZyUb1u1XsR ++ cat /tmp/tmp.mMixLP7mJm ++ rm /tmp/tmp.ZyUb1u1XsR /tmp/tmp.mMixLP7mJm ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cOtmCD7LOn +++ mktemp ++ local LAST_ERR=/tmp/tmp.FHTaoYG7Bz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cOtmCD7LOn ++ cat /tmp/tmp.FHTaoYG7Bz ++ rm /tmp/tmp.cOtmCD7LOn /tmp/tmp.FHTaoYG7Bz ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZIC9Z70607 +++ mktemp ++ local LAST_ERR=/tmp/tmp.H9HeqmVKsQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZIC9Z70607 ++ cat /tmp/tmp.H9HeqmVKsQ ++ rm /tmp/tmp.ZIC9Z70607 /tmp/tmp.H9HeqmVKsQ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vJpl68wYF5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.54Cd6PybwZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vJpl68wYF5 ++ cat /tmp/tmp.54Cd6PybwZ ++ rm /tmp/tmp.vJpl68wYF5 /tmp/tmp.54Cd6PybwZ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Z88CJo4Gse +++ mktemp ++ local LAST_ERR=/tmp/tmp.eiUkZc4tTU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Z88CJo4Gse ++ cat /tmp/tmp.eiUkZc4tTU ++ rm /tmp/tmp.Z88CJo4Gse /tmp/tmp.eiUkZc4tTU ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q1vF3DfWvQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.zJb6IKGanQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q1vF3DfWvQ ++ cat /tmp/tmp.zJb6IKGanQ ++ rm /tmp/tmp.q1vF3DfWvQ /tmp/tmp.zJb6IKGanQ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CumHprDd1v +++ mktemp ++ local LAST_ERR=/tmp/tmp.eWsReWRuw7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CumHprDd1v ++ cat /tmp/tmp.eWsReWRuw7 ++ rm /tmp/tmp.CumHprDd1v /tmp/tmp.eWsReWRuw7 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bsqEUqoybm +++ mktemp ++ local LAST_ERR=/tmp/tmp.9xxaY8YCMs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bsqEUqoybm ++ cat /tmp/tmp.9xxaY8YCMs ++ rm /tmp/tmp.bsqEUqoybm /tmp/tmp.9xxaY8YCMs ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bWCAmFMyVz +++ mktemp ++ local LAST_ERR=/tmp/tmp.wgHgKO1gKd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bWCAmFMyVz ++ cat /tmp/tmp.wgHgKO1gKd ++ rm /tmp/tmp.bWCAmFMyVz /tmp/tmp.wgHgKO1gKd ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0vIwKsn8JA +++ mktemp ++ local LAST_ERR=/tmp/tmp.44zA2jQlOP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0vIwKsn8JA ++ cat /tmp/tmp.44zA2jQlOP ++ rm /tmp/tmp.0vIwKsn8JA /tmp/tmp.44zA2jQlOP ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FOY0mOzFNL +++ mktemp ++ local LAST_ERR=/tmp/tmp.Rbesqo5OkM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FOY0mOzFNL ++ cat /tmp/tmp.Rbesqo5OkM ++ rm /tmp/tmp.FOY0mOzFNL /tmp/tmp.Rbesqo5OkM ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.T672J8YoV5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.W9z4nEP8T8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.T672J8YoV5 ++ cat /tmp/tmp.W9z4nEP8T8 ++ rm /tmp/tmp.T672J8YoV5 /tmp/tmp.W9z4nEP8T8 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xFkmIXJuXF +++ mktemp ++ local LAST_ERR=/tmp/tmp.TbrTh4uhn3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xFkmIXJuXF ++ cat /tmp/tmp.TbrTh4uhn3 ++ rm /tmp/tmp.xFkmIXJuXF /tmp/tmp.TbrTh4uhn3 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 17 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rUCx7QULXj +++ mktemp ++ local LAST_ERR=/tmp/tmp.vLd2CO5E5C ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rUCx7QULXj ++ cat /tmp/tmp.vLd2CO5E5C ++ rm /tmp/tmp.rUCx7QULXj /tmp/tmp.vLd2CO5E5C ++ return 0 + [[ ready == \r\e\a\d\y ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2fmClGlKGO +++ mktemp ++ local LAST_ERR=/tmp/tmp.YFnBtyFoZ6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2fmClGlKGO ++ cat /tmp/tmp.YFnBtyFoZ6 ++ rm /tmp/tmp.2fmClGlKGO /tmp/tmp.YFnBtyFoZ6 ++ return 0 + local client_container=psmdb-client-5f578b7f94-m7mqs + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.Tvnhr3G2Xk ++ mktemp + local LAST_ERR=/tmp/tmp.thu6waGLeh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Tvnhr3G2Xk + cat /tmp/tmp.thu6waGLeh + rm /tmp/tmp.Tvnhr3G2Xk /tmp/tmp.thu6waGLeh + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/find-sharded.json /tmp/tmp.mfU7C0j586/find-sharded + echo + set -o xtrace + check_exported_mongos_service_endpoint 34.45.38.207 + local host=34.45.38.207 ++ kubectl_bin get psmdb some-name '-o=jsonpath={.status.host}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.M7SCNVDb8R +++ mktemp ++ local LAST_ERR=/tmp/tmp.mQryofp0SJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name '-o=jsonpath={.status.host}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M7SCNVDb8R ++ cat /tmp/tmp.mQryofp0SJ ++ rm /tmp/tmp.M7SCNVDb8R /tmp/tmp.mQryofp0SJ ++ return 0 + '[' 34.45.38.207 '!=' 34.45.38.207 ']' + echo 'drop collection' drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kA3Is7Il60 +++ mktemp ++ local LAST_ERR=/tmp/tmp.vnBXMouEOS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kA3Is7Il60 ++ cat /tmp/tmp.vnBXMouEOS ++ rm /tmp/tmp.kA3Is7Il60 /tmp/tmp.vnBXMouEOS ++ return 0 + local client_container=psmdb-client-5f578b7f94-m7mqs + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.CS7PlrfmxF ++ mktemp + local LAST_ERR=/tmp/tmp.MmkmS2jDOL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CS7PlrfmxF Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("d5ecc8dc-784e-492d-89af-1f5cdbefe8be") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.MmkmS2jDOL + rm /tmp/tmp.CS7PlrfmxF /tmp/tmp.MmkmS2jDOL + return 0 + echo 'check backup and restore -- azure-blob' check backup and restore -- azure-blob + run_restore backup-azure-blob-sharded _restore_sharded + local backup_name=backup-azure-blob-sharded + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/conf/restore.yml + /usr/bin/sed -e 's/name:/name: restore-backup-azure-blob-sharded/' + /usr/bin/sed -e 's/backupName:/backupName: backup-azure-blob-sharded/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.AcgAmR53n7 ++ mktemp + local LAST_ERR=/tmp/tmp.rF3pMiPfSw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AcgAmR53n7 perconaservermongodbrestore.psmdb.percona.com/restore-backup-azure-blob-sharded created + cat /tmp/tmp.rF3pMiPfSw + rm /tmp/tmp.AcgAmR53n7 /tmp/tmp.rF3pMiPfSw + return 0 + run_recovery_check backup-azure-blob-sharded _restore_sharded + local backup_name=backup-azure-blob-sharded + local compare_suffix=_restore_sharded + wait_restore backup-azure-blob-sharded some-name requested 0 1200 + local backup_name=backup-azure-blob-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + set +o xtrace waiting psmdb-restore/backup-azure-blob-sharded to reach requested state........................................................................................................................... + '[' 0 -eq 1 ']' + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-physical-sharded-4995", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.xfseOfTNzc ++ mktemp + local LAST_ERR=/tmp/tmp.kzmOTFE7Qi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xfseOfTNzc + cat /tmp/tmp.kzmOTFE7Qi + rm /tmp/tmp.xfseOfTNzc /tmp/tmp.kzmOTFE7Qi + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + wait_restore backup-azure-blob-sharded some-name ready 0 1800 + local backup_name=backup-azure-blob-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + set +o xtrace waiting psmdb-restore/backup-azure-blob-sharded to reach ready state..................................................................................................... + '[' 0 -eq 1 ']' + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.YOiaNtXdGd ++ mktemp + local LAST_ERR=/tmp/tmp.YqY8IjR5dX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YOiaNtXdGd apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-physical-sharded-4995"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-physical-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"minio":{"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod7.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"exposeType":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2024-06-10T14:17:56Z" generation: 2 name: some-name namespace: demand-backup-physical-sharded-4995 resourceVersion: "29203" uid: 34d37458-02e9-4004-b8b2-97fe3b42d62a spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-physical-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 minio: s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.17.0 image: perconalab/percona-server-mongodb-operator:main-mongod7.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: exposeType: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: conditions: - lastTransitionTime: "2024-06-10T14:37:50Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:37:50Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:38:03Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:38:03Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:45:42Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:45:42Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:46:07Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:46:07Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:46:20Z" reason: MongosReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:46:29Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:47:09Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:47:09Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:47:42Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:47:42Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:47:56Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:47:56Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:48:11Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:48:10Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:48:35Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:48:35Z" status: "True" type: initializing host: 34.45.38.207 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod7.0 mongoVersion: 7.0.11-6 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 6 replsets: cfg: initialized: true ready: 3 size: 3 status: ready rs0: added_as_shard: true initialized: true ready: 3 size: 3 status: ready size: 6 state: initializing + cat /tmp/tmp.YqY8IjR5dX + rm /tmp/tmp.YOiaNtXdGd /tmp/tmp.YqY8IjR5dX + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7vogPwovGA +++ mktemp ++ local LAST_ERR=/tmp/tmp.De1u32um71 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7vogPwovGA ++ cat /tmp/tmp.De1u32um71 ++ rm /tmp/tmp.7vogPwovGA /tmp/tmp.De1u32um71 ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.afwVd2ruPx +++ mktemp ++ local LAST_ERR=/tmp/tmp.IO2eojbUTe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.afwVd2ruPx ++ cat /tmp/tmp.IO2eojbUTe ++ rm /tmp/tmp.afwVd2ruPx /tmp/tmp.IO2eojbUTe ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vS6aMQVjJl +++ mktemp ++ local LAST_ERR=/tmp/tmp.Htbb998odB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vS6aMQVjJl ++ cat /tmp/tmp.Htbb998odB ++ rm /tmp/tmp.vS6aMQVjJl /tmp/tmp.Htbb998odB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BFI56HL5N7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Zitk1C44E8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BFI56HL5N7 ++ cat /tmp/tmp.Zitk1C44E8 ++ rm /tmp/tmp.BFI56HL5N7 /tmp/tmp.Zitk1C44E8 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.baXSvqseYn +++ mktemp ++ local LAST_ERR=/tmp/tmp.9A68qhY845 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.baXSvqseYn ++ cat /tmp/tmp.9A68qhY845 ++ rm /tmp/tmp.baXSvqseYn /tmp/tmp.9A68qhY845 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oiE2Ey1965 +++ mktemp ++ local LAST_ERR=/tmp/tmp.y5uZ3NsS99 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oiE2Ey1965 ++ cat /tmp/tmp.y5uZ3NsS99 ++ rm /tmp/tmp.oiE2Ey1965 /tmp/tmp.y5uZ3NsS99 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kRxJmp9OQs +++ mktemp ++ local LAST_ERR=/tmp/tmp.FUqdxAJE4x ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kRxJmp9OQs ++ cat /tmp/tmp.FUqdxAJE4x ++ rm /tmp/tmp.kRxJmp9OQs /tmp/tmp.FUqdxAJE4x ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PR00LtP1OL +++ mktemp ++ local LAST_ERR=/tmp/tmp.k44f3bUNNt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PR00LtP1OL ++ cat /tmp/tmp.k44f3bUNNt ++ rm /tmp/tmp.PR00LtP1OL /tmp/tmp.k44f3bUNNt ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ofqKWrHI4L +++ mktemp ++ local LAST_ERR=/tmp/tmp.tgcV35pymb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ofqKWrHI4L ++ cat /tmp/tmp.tgcV35pymb ++ rm /tmp/tmp.ofqKWrHI4L /tmp/tmp.tgcV35pymb ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5ssHeSBpyY +++ mktemp ++ local LAST_ERR=/tmp/tmp.VE1PNAiRv1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5ssHeSBpyY ++ cat /tmp/tmp.VE1PNAiRv1 ++ rm /tmp/tmp.5ssHeSBpyY /tmp/tmp.VE1PNAiRv1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hc0WZ14hof +++ mktemp ++ local LAST_ERR=/tmp/tmp.Yw2adJdpYu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hc0WZ14hof ++ cat /tmp/tmp.Yw2adJdpYu ++ rm /tmp/tmp.hc0WZ14hof /tmp/tmp.Yw2adJdpYu ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KFgwIm5DAS +++ mktemp ++ local LAST_ERR=/tmp/tmp.Q64xEvvl47 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KFgwIm5DAS ++ cat /tmp/tmp.Q64xEvvl47 ++ rm /tmp/tmp.KFgwIm5DAS /tmp/tmp.Q64xEvvl47 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0ZfAvAAkd6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.R5g9AJjpiV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0ZfAvAAkd6 ++ cat /tmp/tmp.R5g9AJjpiV ++ rm /tmp/tmp.0ZfAvAAkd6 /tmp/tmp.R5g9AJjpiV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4vNU40jeh2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.1QdjuV1gPJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4vNU40jeh2 ++ cat /tmp/tmp.1QdjuV1gPJ ++ rm /tmp/tmp.4vNU40jeh2 /tmp/tmp.1QdjuV1gPJ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VwBNSlbiCo +++ mktemp ++ local LAST_ERR=/tmp/tmp.gmvaSsSd2i ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VwBNSlbiCo ++ cat /tmp/tmp.gmvaSsSd2i ++ rm /tmp/tmp.VwBNSlbiCo /tmp/tmp.gmvaSsSd2i ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.y3aAu4ZFJQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.Q3cxr9bAHi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.y3aAu4ZFJQ ++ cat /tmp/tmp.Q3cxr9bAHi ++ rm /tmp/tmp.y3aAu4ZFJQ /tmp/tmp.Q3cxr9bAHi ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.m5KsbyICWa +++ mktemp ++ local LAST_ERR=/tmp/tmp.0SsehpfZ2e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.m5KsbyICWa ++ cat /tmp/tmp.0SsehpfZ2e ++ rm /tmp/tmp.m5KsbyICWa /tmp/tmp.0SsehpfZ2e ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wLrICudgV7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.9LNuLx9zks ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wLrICudgV7 ++ cat /tmp/tmp.9LNuLx9zks ++ rm /tmp/tmp.wLrICudgV7 /tmp/tmp.9LNuLx9zks ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 17 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Swfi7pfW4d +++ mktemp ++ local LAST_ERR=/tmp/tmp.nCPn3scml3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Swfi7pfW4d ++ cat /tmp/tmp.nCPn3scml3 ++ rm /tmp/tmp.Swfi7pfW4d /tmp/tmp.nCPn3scml3 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 18 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.t4YblRGWB9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.CEgieLx1Fq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.t4YblRGWB9 ++ cat /tmp/tmp.CEgieLx1Fq ++ rm /tmp/tmp.t4YblRGWB9 /tmp/tmp.CEgieLx1Fq ++ return 0 + [[ ready == \r\e\a\d\y ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dOhyN4t7H4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.qAIN7L2SRv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dOhyN4t7H4 ++ cat /tmp/tmp.qAIN7L2SRv ++ rm /tmp/tmp.dOhyN4t7H4 /tmp/tmp.qAIN7L2SRv ++ return 0 + local client_container=psmdb-client-5f578b7f94-m7mqs + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.4jgpeFyo3T ++ mktemp + local LAST_ERR=/tmp/tmp.c4DPnAOkxw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4jgpeFyo3T + cat /tmp/tmp.c4DPnAOkxw + rm /tmp/tmp.4jgpeFyo3T /tmp/tmp.c4DPnAOkxw + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/find-sharded.json /tmp/tmp.mfU7C0j586/find-sharded + echo + set -o xtrace + check_exported_mongos_service_endpoint 34.45.38.207 + local host=34.45.38.207 ++ kubectl_bin get psmdb some-name '-o=jsonpath={.status.host}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ujag8MmZG0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.erhlxdGF6N ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name '-o=jsonpath={.status.host}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ujag8MmZG0 ++ cat /tmp/tmp.erhlxdGF6N ++ rm /tmp/tmp.ujag8MmZG0 /tmp/tmp.erhlxdGF6N ++ return 0 + '[' 34.45.38.207 '!=' 34.45.38.207 ']' + echo 'drop collection' drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yBG9lpKEti +++ mktemp ++ local LAST_ERR=/tmp/tmp.X73aErlbvO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yBG9lpKEti ++ cat /tmp/tmp.X73aErlbvO ++ rm /tmp/tmp.yBG9lpKEti /tmp/tmp.X73aErlbvO ++ return 0 + local client_container=psmdb-client-5f578b7f94-m7mqs + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.FFRXb2jKcd ++ mktemp + local LAST_ERR=/tmp/tmp.aWNpokdsMK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FFRXb2jKcd Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("bfa08fa2-f5a2-47e5-a8ee-18d8eef6e07a") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.aWNpokdsMK + rm /tmp/tmp.FFRXb2jKcd /tmp/tmp.aWNpokdsMK + return 0 + echo 'check backup and restore -- minio' check backup and restore -- minio ++ get_backup_dest backup-minio-sharded ++ local backup_name=backup-minio-sharded ++ kubectl_bin get psmdb-backup backup-minio-sharded -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.69XhQw5XQF +++ mktemp ++ local LAST_ERR=/tmp/tmp.aiWe7dEviG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-minio-sharded -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.69XhQw5XQF ++ cat /tmp/tmp.aiWe7dEviG ++ rm /tmp/tmp.69XhQw5XQF /tmp/tmp.aiWe7dEviG ++ return 0 + backup_dest_minio=operator-testing/2024-06-10T14:22:45Z + run_restore backup-minio-sharded _restore_sharded + local backup_name=backup-minio-sharded + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/conf/restore.yml + /usr/bin/sed -e 's/name:/name: restore-backup-minio-sharded/' + /usr/bin/sed -e 's/backupName:/backupName: backup-minio-sharded/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.ZpCfkQC2qn ++ mktemp + local LAST_ERR=/tmp/tmp.e6eff6anJH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZpCfkQC2qn perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-sharded created + cat /tmp/tmp.e6eff6anJH + rm /tmp/tmp.ZpCfkQC2qn /tmp/tmp.e6eff6anJH + return 0 + run_recovery_check backup-minio-sharded _restore_sharded + local backup_name=backup-minio-sharded + local compare_suffix=_restore_sharded + wait_restore backup-minio-sharded some-name requested 0 1200 + local backup_name=backup-minio-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1200 + set +o xtrace waiting psmdb-restore/backup-minio-sharded to reach requested state..................................................................................... + '[' 0 -eq 1 ']' + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-physical-sharded-4995", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | ++ mktemp (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.ErcPIfdRRt ++ mktemp + local LAST_ERR=/tmp/tmp.KGqxbVAqV5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ErcPIfdRRt + cat /tmp/tmp.KGqxbVAqV5 + rm /tmp/tmp.ErcPIfdRRt /tmp/tmp.KGqxbVAqV5 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.mfU7C0j586/statefulset_some-name-rs0.yml + wait_restore backup-minio-sharded some-name ready 0 1800 + local backup_name=backup-minio-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + set +o xtrace waiting psmdb-restore/backup-minio-sharded to reach ready state.................................................................................................... + '[' 0 -eq 1 ']' + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.nU13fZehrA ++ mktemp + local LAST_ERR=/tmp/tmp.RvC84QksFB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nU13fZehrA apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-physical-sharded-4995"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-physical-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-physical-sharded","region":"us-east-1"},"type":"s3"},"minio":{"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod7.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"exposeType":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"exposeType":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2024-06-10T14:17:56Z" generation: 2 name: some-name namespace: demand-backup-physical-sharded-4995 resourceVersion: "33505" uid: 34d37458-02e9-4004-b8b2-97fe3b42d62a spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-physical-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-physical-sharded region: us-east-1 type: s3 minio: s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.17.0 image: perconalab/percona-server-mongodb-operator:main-mongod7.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false exposeType: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: exposeType: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: conditions: - lastTransitionTime: "2024-06-10T14:48:11Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:48:10Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:48:35Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:48:35Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:54:11Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:54:11Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:54:33Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:54:33Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:54:47Z" reason: MongosReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:55:04Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:55:37Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:55:37Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:56:09Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:56:09Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:56:11Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:56:11Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:56:43Z" message: 'cfg: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:56:43Z" status: "True" type: initializing - lastTransitionTime: "2024-06-10T14:56:56Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2024-06-10T14:56:56Z" status: "True" type: initializing host: 34.45.38.207 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod7.0 mongoVersion: 7.0.11-6 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 6 replsets: cfg: initialized: true ready: 3 size: 3 status: ready rs0: added_as_shard: true initialized: true ready: 3 size: 3 status: ready size: 6 state: initializing + cat /tmp/tmp.RvC84QksFB + rm /tmp/tmp.nU13fZehrA /tmp/tmp.RvC84QksFB + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HVHTdHezAk +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZgxNp3lao9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HVHTdHezAk ++ cat /tmp/tmp.ZgxNp3lao9 ++ rm /tmp/tmp.HVHTdHezAk /tmp/tmp.ZgxNp3lao9 ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XI3BjC11Eb +++ mktemp ++ local LAST_ERR=/tmp/tmp.OOPvD6lFgg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XI3BjC11Eb ++ cat /tmp/tmp.OOPvD6lFgg ++ rm /tmp/tmp.XI3BjC11Eb /tmp/tmp.OOPvD6lFgg ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WvBQpjeUrs +++ mktemp ++ local LAST_ERR=/tmp/tmp.iyMUvZD9ga ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WvBQpjeUrs ++ cat /tmp/tmp.iyMUvZD9ga ++ rm /tmp/tmp.WvBQpjeUrs /tmp/tmp.iyMUvZD9ga ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2l1EfEjKHf +++ mktemp ++ local LAST_ERR=/tmp/tmp.0O0x0RxAKb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2l1EfEjKHf ++ cat /tmp/tmp.0O0x0RxAKb ++ rm /tmp/tmp.2l1EfEjKHf /tmp/tmp.0O0x0RxAKb ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6vmfnzmNkU +++ mktemp ++ local LAST_ERR=/tmp/tmp.ur8Uqw427b ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6vmfnzmNkU ++ cat /tmp/tmp.ur8Uqw427b ++ rm /tmp/tmp.6vmfnzmNkU /tmp/tmp.ur8Uqw427b ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.muIioxmIIw +++ mktemp ++ local LAST_ERR=/tmp/tmp.1jCT9AwNK4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.muIioxmIIw ++ cat /tmp/tmp.1jCT9AwNK4 ++ rm /tmp/tmp.muIioxmIIw /tmp/tmp.1jCT9AwNK4 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.N53F3CGTu1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QPKPR4JPFD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.N53F3CGTu1 ++ cat /tmp/tmp.QPKPR4JPFD ++ rm /tmp/tmp.N53F3CGTu1 /tmp/tmp.QPKPR4JPFD ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YCzvkLIrlu +++ mktemp ++ local LAST_ERR=/tmp/tmp.jGdQCCWhTI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YCzvkLIrlu ++ cat /tmp/tmp.jGdQCCWhTI ++ rm /tmp/tmp.YCzvkLIrlu /tmp/tmp.jGdQCCWhTI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dopMuWlhbP +++ mktemp ++ local LAST_ERR=/tmp/tmp.5AMMt356ZI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dopMuWlhbP ++ cat /tmp/tmp.5AMMt356ZI ++ rm /tmp/tmp.dopMuWlhbP /tmp/tmp.5AMMt356ZI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FEuNc4WoJN +++ mktemp ++ local LAST_ERR=/tmp/tmp.12kUAuDDn6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FEuNc4WoJN ++ cat /tmp/tmp.12kUAuDDn6 ++ rm /tmp/tmp.FEuNc4WoJN /tmp/tmp.12kUAuDDn6 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oLzMmgcqAY +++ mktemp ++ local LAST_ERR=/tmp/tmp.jto6TfJzFz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oLzMmgcqAY ++ cat /tmp/tmp.jto6TfJzFz ++ rm /tmp/tmp.oLzMmgcqAY /tmp/tmp.jto6TfJzFz ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8ovh9hG47c +++ mktemp ++ local LAST_ERR=/tmp/tmp.0OszBUrWmI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8ovh9hG47c ++ cat /tmp/tmp.0OszBUrWmI ++ rm /tmp/tmp.8ovh9hG47c /tmp/tmp.0OszBUrWmI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kDuyBLaHF1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.no6YZsEZaq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kDuyBLaHF1 ++ cat /tmp/tmp.no6YZsEZaq ++ rm /tmp/tmp.kDuyBLaHF1 /tmp/tmp.no6YZsEZaq ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QhrmqvvIqe +++ mktemp ++ local LAST_ERR=/tmp/tmp.XcShNCcyg9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QhrmqvvIqe ++ cat /tmp/tmp.XcShNCcyg9 ++ rm /tmp/tmp.QhrmqvvIqe /tmp/tmp.XcShNCcyg9 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LGK0SefOaW +++ mktemp ++ local LAST_ERR=/tmp/tmp.F3TokFLKKz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LGK0SefOaW ++ cat /tmp/tmp.F3TokFLKKz ++ rm /tmp/tmp.LGK0SefOaW /tmp/tmp.F3TokFLKKz ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uOlym7iXQn +++ mktemp ++ local LAST_ERR=/tmp/tmp.ahXXstgTC6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uOlym7iXQn ++ cat /tmp/tmp.ahXXstgTC6 ++ rm /tmp/tmp.uOlym7iXQn /tmp/tmp.ahXXstgTC6 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.S1QoWud5D0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.aCHFrtOZZR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.S1QoWud5D0 ++ cat /tmp/tmp.aCHFrtOZZR ++ rm /tmp/tmp.S1QoWud5D0 /tmp/tmp.aCHFrtOZZR ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LLcHX3g9sU +++ mktemp ++ local LAST_ERR=/tmp/tmp.CzwwX2NABW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LLcHX3g9sU ++ cat /tmp/tmp.CzwwX2NABW ++ rm /tmp/tmp.LLcHX3g9sU /tmp/tmp.CzwwX2NABW ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 17 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.97fzlRfhRG +++ mktemp ++ local LAST_ERR=/tmp/tmp.SAPScfGsAX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.97fzlRfhRG ++ cat /tmp/tmp.SAPScfGsAX ++ rm /tmp/tmp.97fzlRfhRG /tmp/tmp.SAPScfGsAX ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 18 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZAyWudVQ5D +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZmrYTnrwAz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZAyWudVQ5D ++ cat /tmp/tmp.ZmrYTnrwAz ++ rm /tmp/tmp.ZAyWudVQ5D /tmp/tmp.ZmrYTnrwAz ++ return 0 + [[ ready == \r\e\a\d\y ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 mongodb '' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K7chYH0Xmy +++ mktemp ++ local LAST_ERR=/tmp/tmp.XuiA7CD8Qg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.K7chYH0Xmy ++ cat /tmp/tmp.XuiA7CD8Qg ++ rm /tmp/tmp.K7chYH0Xmy /tmp/tmp.XuiA7CD8Qg ++ return 0 + local client_container=psmdb-client-5f578b7f94-m7mqs + local mongo_flag= + kubectl_bin exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.PS8CbpLkri ++ mktemp + local LAST_ERR=/tmp/tmp.sOLR63OEOe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5f578b7f94-m7mqs -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-physical-sharded-4995.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PS8CbpLkri + cat /tmp/tmp.sOLR63OEOe + rm /tmp/tmp.PS8CbpLkri /tmp/tmp.sOLR63OEOe + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/e2e-tests/demand-backup-physical-sharded/compare/find-sharded.json /tmp/tmp.mfU7C0j586/find-sharded + echo + set -o xtrace + destroy demand-backup-physical-sharded-4995 + local namespace=demand-backup-physical-sharded-4995 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.cOyqB7U1gp ++ mktemp + local LAST_ERR=/tmp/tmp.7dC2S70Ckc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cOyqB7U1gp customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.7dC2S70Ckc + rm /tmp/tmp.cOyqB7U1gp /tmp/tmp.7dC2S70Ckc + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-physical-sharded-4995 backup-azure-blob-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob-sharded patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-physical-sharded-4995 backup-minio-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-minio-sharded patched + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.aHuGWkSQCs ++ mktemp + local LAST_ERR=/tmp/tmp.0zaQi01UUE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aHuGWkSQCs customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com condition met + cat /tmp/tmp.0zaQi01UUE + rm /tmp/tmp.aHuGWkSQCs /tmp/tmp.0zaQi01UUE + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.gYdE28xhSp ++ mktemp + local LAST_ERR=/tmp/tmp.bl8w1m29Mh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gYdE28xhSp + cat /tmp/tmp.bl8w1m29Mh + rm /tmp/tmp.gYdE28xhSp /tmp/tmp.bl8w1m29Mh + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.L5lbUGRpKo ++ mktemp + local LAST_ERR=/tmp/tmp.gK0UHqhNlJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.L5lbUGRpKo + cat /tmp/tmp.gK0UHqhNlJ + rm /tmp/tmp.L5lbUGRpKo /tmp/tmp.gK0UHqhNlJ + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.MLvyayHkXJ ++ mktemp + local LAST_ERR=/tmp/tmp.vgv3bvvKwg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1569/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MLvyayHkXJ clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.vgv3bvvKwg + rm /tmp/tmp.MLvyayHkXJ /tmp/tmp.vgv3bvvKwg + return 0 + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml + : + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace demand-backup-physical-sharded-4995 + rm -rf /tmp/tmp.mfU7C0j586 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.nOepEK77RH + local LAST_OUT=/tmp/tmp.yxQTPdL08G ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.ADklAMmWhr + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.XwDPI8uuyv + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace demand-backup-physical-sharded-4995 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator