Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/logs/monitoring-2-0.log WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 + create_infra monitoring-2-0-21005 + local ns=monitoring-2-0-21005 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.qznRTKbmeU ++ mktemp + local LAST_ERR=/tmp/tmp.KLh12Yg2K7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qznRTKbmeU customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.KLh12Yg2K7 + rm /tmp/tmp.qznRTKbmeU /tmp/tmp.KLh12Yg2K7 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.upbR2YQ0qg ++ mktemp + local LAST_ERR=/tmp/tmp.Y7WYeSW1gV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.upbR2YQ0qg + cat /tmp/tmp.Y7WYeSW1gV + rm /tmp/tmp.upbR2YQ0qg /tmp/tmp.Y7WYeSW1gV + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.XUIAhdWRO9 ++ mktemp + local LAST_ERR=/tmp/tmp.F2RINVwR3b + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XUIAhdWRO9 + cat /tmp/tmp.F2RINVwR3b + rm /tmp/tmp.XUIAhdWRO9 /tmp/tmp.F2RINVwR3b + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.VyLaUFSg3V ++ mktemp + local LAST_ERR=/tmp/tmp.yasUB7EXn1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VyLaUFSg3V + cat /tmp/tmp.yasUB7EXn1 + rm /tmp/tmp.VyLaUFSg3V /tmp/tmp.yasUB7EXn1 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.I7KoYMWBQB ++ mktemp + local LAST_ERR=/tmp/tmp.xpgbP96wnJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I7KoYMWBQB clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.xpgbP96wnJ + rm /tmp/tmp.I7KoYMWBQB /tmp/tmp.xpgbP96wnJ + return 0 + check_crd_for_deletion PR-1578-5cce6998 + local git_tag=PR-1578-5cce6998 ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ yq eval .metadata.name ++ /usr/bin/sed s/---//g ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1578-5cce6998/deploy/crd.yaml + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KLoblqqfy8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.KbOBXhafAh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.KLoblqqfy8 ++ cat /tmp/tmp.KbOBXhafAh Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.KLoblqqfy8 ++ cat /tmp/tmp.KbOBXhafAh Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.KLoblqqfy8 ++ cat /tmp/tmp.KbOBXhafAh Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.KLoblqqfy8 ++ cat /tmp/tmp.KbOBXhafAh Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.KLoblqqfy8 /tmp/tmp.KbOBXhafAh ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' ++ tail -n1 + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.vZhzdkX5TZ ++ mktemp + local LAST_ERR=/tmp/tmp.CzzBDkPW4s + local exit_status=0 + local timeout=4 + local LAST_OUT=/tmp/tmp.jTxnYgYmEH ++ seq 0 2 ++ mktemp + for i in '$(seq 0 2)' + set +e + local LAST_ERR=/tmp/tmp.Ju6l0MeHSE + kubectl delete namespace psmdb-operator --ignore-not-found + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + xargs kubectl delete ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jTxnYgYmEH + cat /tmp/tmp.Ju6l0MeHSE + rm /tmp/tmp.jTxnYgYmEH /tmp/tmp.Ju6l0MeHSE + return 0 namespace "cert-manager" deleted namespace "monitoring-2-0-15518" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vZhzdkX5TZ namespace "psmdb-operator" deleted + cat /tmp/tmp.CzzBDkPW4s + rm /tmp/tmp.vZhzdkX5TZ /tmp/tmp.CzzBDkPW4s + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.dIUqWUsqUW ++ mktemp + local LAST_ERR=/tmp/tmp.zcSrpyFOyo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dIUqWUsqUW + cat /tmp/tmp.zcSrpyFOyo + rm /tmp/tmp.dIUqWUsqUW /tmp/tmp.zcSrpyFOyo + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.8YQpfDK5Nj ++ mktemp + local LAST_ERR=/tmp/tmp.FXSSyJ1vdO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8YQpfDK5Nj namespace/psmdb-operator created + cat /tmp/tmp.FXSSyJ1vdO + rm /tmp/tmp.8YQpfDK5Nj /tmp/tmp.FXSSyJ1vdO + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.IQrxXPSeVU +++ mktemp ++ local LAST_ERR=/tmp/tmp.etoejG3Nqn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IQrxXPSeVU ++ cat /tmp/tmp.etoejG3Nqn ++ rm /tmp/tmp.IQrxXPSeVU /tmp/tmp.etoejG3Nqn ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1578-5cce6998-4-cluster5 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.fvNdLNpuB9 ++ mktemp + local LAST_ERR=/tmp/tmp.8MZX0EALBP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1578-5cce6998-4-cluster5 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fvNdLNpuB9 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1578-5cce6998-4-cluster5" modified. + cat /tmp/tmp.8MZX0EALBP + rm /tmp/tmp.fvNdLNpuB9 /tmp/tmp.8MZX0EALBP + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.IVqYlSJyD6 ++ mktemp + local LAST_ERR=/tmp/tmp.p9ZrCEOIqm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IVqYlSJyD6 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.p9ZrCEOIqm + rm /tmp/tmp.IVqYlSJyD6 /tmp/tmp.p9ZrCEOIqm + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: psmdb-operator^' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/deploy/cw-rbac.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.jUj0UGMgTd ++ mktemp + local LAST_ERR=/tmp/tmp.rbmWbaBXOH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jUj0UGMgTd clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.rbmWbaBXOH + rm /tmp/tmp.jUj0UGMgTd /tmp/tmp.rbmWbaBXOH + return 0 + kubectl_bin apply -f - ++ mktemp + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1578-5cce6998") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/deploy/cw-operator.yaml + local LAST_OUT=/tmp/tmp.5yB1qiuTtD ++ mktemp + local LAST_ERR=/tmp/tmp.dyKpb42wWW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5yB1qiuTtD deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.dyKpb42wWW + rm /tmp/tmp.5yB1qiuTtD /tmp/tmp.dyKpb42wWW + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.U4yDqXF9nx +++ mktemp ++ local LAST_ERR=/tmp/tmp.JenCZdlivw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.U4yDqXF9nx ++ cat /tmp/tmp.JenCZdlivw ++ rm /tmp/tmp.U4yDqXF9nx /tmp/tmp.JenCZdlivw ++ return 0 + wait_pod percona-server-mongodb-operator-7dd65664b8-c6ddc + local pod=percona-server-mongodb-operator-7dd65664b8-c6ddc + set +o xtrace waiting for pod/percona-server-mongodb-operator-7dd65664b8-c6ddc to be ready.OK + create_namespace monitoring-2-0-21005 + local namespace=monitoring-2-0-21005 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// ++ tail -n1 + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces monitoring-2-0-21005' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-2-0-21005++ mktemp ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-21005 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.oddsTkTSBA + local LAST_OUT=/tmp/tmp.MPM4kk9e2Q ++ mktemp + xargs kubectl delete ns + local LAST_ERR=/tmp/tmp.QCAZWQTkw0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ mktemp + local LAST_ERR=/tmp/tmp.xkzQ4QhRUQ + local exit_status=0 + local timeout=4 + awk '{print$1}' + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-21005 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oddsTkTSBA + cat /tmp/tmp.QCAZWQTkw0 + rm /tmp/tmp.oddsTkTSBA /tmp/tmp.QCAZWQTkw0 + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MPM4kk9e2Q + cat /tmp/tmp.xkzQ4QhRUQ + rm /tmp/tmp.MPM4kk9e2Q /tmp/tmp.xkzQ4QhRUQ + return 0 + kubectl_bin wait --for=delete namespace monitoring-2-0-21005 ++ mktemp + local LAST_OUT=/tmp/tmp.8yiPuIxuNw ++ mktemp + local LAST_ERR=/tmp/tmp.IkUaQzS7dX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace monitoring-2-0-21005 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8yiPuIxuNw + cat /tmp/tmp.IkUaQzS7dX + rm /tmp/tmp.8yiPuIxuNw /tmp/tmp.IkUaQzS7dX + return 0 + desc 'create namespace monitoring-2-0-21005' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-21005 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-21005 ++ mktemp + local LAST_OUT=/tmp/tmp.kSga0Gz57N ++ mktemp + local LAST_ERR=/tmp/tmp.HI2BBgDIzO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace monitoring-2-0-21005 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kSga0Gz57N namespace/monitoring-2-0-21005 created + cat /tmp/tmp.HI2BBgDIzO + rm /tmp/tmp.kSga0Gz57N /tmp/tmp.HI2BBgDIzO + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.1F6hz3km60 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Gz8iL0Gf4x ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1F6hz3km60 ++ cat /tmp/tmp.Gz8iL0Gf4x ++ rm /tmp/tmp.1F6hz3km60 /tmp/tmp.Gz8iL0Gf4x ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1578-5cce6998-4-cluster5 --namespace=monitoring-2-0-21005 ++ mktemp + local LAST_OUT=/tmp/tmp.guQGzl3NvK ++ mktemp + local LAST_ERR=/tmp/tmp.sNHNao7B2v + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1578-5cce6998-4-cluster5 --namespace=monitoring-2-0-21005 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.guQGzl3NvK Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1578-5cce6998-4-cluster5" modified. + cat /tmp/tmp.sNHNao7B2v + rm /tmp/tmp.guQGzl3NvK /tmp/tmp.sNHNao7B2v + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.CE2Uf6DsQ2 ++ mktemp + local LAST_ERR=/tmp/tmp.GuRdLhdyDB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CE2Uf6DsQ2 namespace/cert-manager created + cat /tmp/tmp.GuRdLhdyDB + rm /tmp/tmp.CE2Uf6DsQ2 /tmp/tmp.GuRdLhdyDB + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.wa674zHM1k ++ mktemp + local LAST_ERR=/tmp/tmp.gsuipBBLVH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wa674zHM1k namespace/cert-manager labeled + cat /tmp/tmp.gsuipBBLVH + rm /tmp/tmp.wa674zHM1k /tmp/tmp.gsuipBBLVH + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.F0eSeQrLX4 ++ mktemp + local LAST_ERR=/tmp/tmp.2kKbOEimU1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.F0eSeQrLX4 namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews configured role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection configured rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.2kKbOEimU1 Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.F0eSeQrLX4 /tmp/tmp.2kKbOEimU1 + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.rqoaa974dA ++ mktemp + local LAST_ERR=/tmp/tmp.ACExxfKUWe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rqoaa974dA pod/cert-manager-5658d944df-f87q9 condition met pod/cert-manager-cainjector-cb99ff845-7s46g condition met pod/cert-manager-webhook-7fd74b8dc7-92tn2 condition met + cat /tmp/tmp.ACExxfKUWe + rm /tmp/tmp.rqoaa974dA /tmp/tmp.ACExxfKUWe + return 0 + sleep 120 + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + helm repo remove stable "stable" has been removed from your repositories + helm repo add stable https://charts.helm.sh/stable "stable" has been added to your repositories + [[ -n '' ]] + retry 10 60 helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Wed Jun 26 08:40:18 2024 NAMESPACE: monitoring-2-0-21005 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-21005.svc.cluster.local:443 login: admin password: admin + sleep 20 + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.PGPWuI2TwE ++ mktemp + local LAST_ERR=/tmp/tmp.iyQM9LKJ5W + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PGPWuI2TwE + cat /tmp/tmp.iyQM9LKJ5W + rm /tmp/tmp.PGPWuI2TwE /tmp/tmp.iyQM9LKJ5W + return 0 + cluster=monitoring + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.JghftPz6mL ++ mktemp + local LAST_ERR=/tmp/tmp.oQawTt9xR9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JghftPz6mL secret/some-users created secret/some-users unchanged + cat /tmp/tmp.oQawTt9xR9 + rm /tmp/tmp.JghftPz6mL /tmp/tmp.oQawTt9xR9 + return 0 + kubectl_bin apply -f - + yq '.spec.template.spec.volumes[0].secret.secretName="monitoring-ssl"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.SXSdeP6Q5b ++ mktemp + local LAST_ERR=/tmp/tmp.Eb1onJcDxQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SXSdeP6Q5b deployment.apps/psmdb-client created + cat /tmp/tmp.Eb1onJcDxQ + rm /tmp/tmp.SXSdeP6Q5b /tmp/tmp.Eb1onJcDxQ + return 0 + sleep 90 + desc 'create first PSMDB cluster monitoring' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster monitoring ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1578-5cce6998"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Pm4H1lU8uT ++ mktemp + local LAST_ERR=/tmp/tmp.oBZnpWOy2z + local exit_status=0 + local timeout=4 ++ seq 0 2 + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Pm4H1lU8uT perconaservermongodb.psmdb.percona.com/monitoring created + cat /tmp/tmp.oBZnpWOy2z + rm /tmp/tmp.Pm4H1lU8uT /tmp/tmp.oBZnpWOy2z + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready............OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready...........OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jsfoqO71du +++ mktemp ++ local LAST_ERR=/tmp/tmp.ThuD9eIBPl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jsfoqO71du ++ cat /tmp/tmp.ThuD9eIBPl ++ rm /tmp/tmp.jsfoqO71du /tmp/tmp.ThuD9eIBPl ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready..........OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TvM3GgJ8ae +++ mktemp ++ local LAST_ERR=/tmp/tmp.6RZPXffHKQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TvM3GgJ8ae ++ cat /tmp/tmp.6RZPXffHKQ ++ rm /tmp/tmp.TvM3GgJ8ae /tmp/tmp.6RZPXffHKQ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.................... + desc 'check if pmm-client container is not enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container is not enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 -no-pmm + local resource=statefulset/monitoring-rs0 + local postfix=-no-pmm + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml + local new_result=/tmp/tmp.D0vv3Eolup/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-21005", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml statefulset/monitoring-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.J5b3A1Jxx6 ++ mktemp + local LAST_ERR=/tmp/tmp.m7AE79dXJN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.J5b3A1Jxx6 + cat /tmp/tmp.m7AE79dXJN + rm /tmp/tmp.J5b3A1Jxx6 /tmp/tmp.m7AE79dXJN + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.D0vv3Eolup/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.D0vv3Eolup/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.D0vv3Eolup/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml /tmp/tmp.D0vv3Eolup/statefulset_monitoring-rs0.yml + sleep 10 + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-21005 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-21005 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.B1BjXQhPJi +++ mktemp ++ local LAST_ERR=/tmp/tmp.SdD9jWeqcW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.B1BjXQhPJi ++ cat /tmp/tmp.SdD9jWeqcW ++ rm /tmp/tmp.B1BjXQhPJi /tmp/tmp.SdD9jWeqcW ++ return 0 + local client_container=psmdb-client-6cd48df8b6-vv5md + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-vv5md -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-21005.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.crCBAwLl2V ++ mktemp + local LAST_ERR=/tmp/tmp.f158UYT51G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-vv5md -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-21005.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.crCBAwLl2V Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-21005.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-06-26T08:44:53.227Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("543fa8ae-3b61-401b-92f4-a0653887306f") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.f158UYT51G + rm /tmp/tmp.crCBAwLl2V /tmp/tmp.f158UYT51G + return 0 + run_mongos 'sh.enableSharding("myApp")' clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-21005 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=sh.enableSharding("myApp")' + local uri=clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-21005 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JbWEzfP5TZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.CNIUsTOE2U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JbWEzfP5TZ ++ cat /tmp/tmp.CNIUsTOE2U ++ rm /tmp/tmp.JbWEzfP5TZ /tmp/tmp.CNIUsTOE2U ++ return 0 + local client_container=psmdb-client-6cd48df8b6-vv5md + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-vv5md -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-21005.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.Zr65xkjOXQ ++ mktemp + local LAST_ERR=/tmp/tmp.X82vVVh0YG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-vv5md -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-21005.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Zr65xkjOXQ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-21005.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-06-26T08:44:56.675Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("3ebe4dca-a6d8-4f7d-959e-3b84d5571cde") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1719391496, 9), "signature" : { "hash" : BinData(0,"6cJ/g/+VH7nvsB2UJ4PEYN79Aw8="), "keyId" : NumberLong("7384729879268294679") } }, "operationTime" : Timestamp(1719391496, 3) } bye + cat /tmp/tmp.X82vVVh0YG + rm /tmp/tmp.Zr65xkjOXQ /tmp/tmp.X82vVVh0YG + return 0 + insert_data_mongos 100500 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local data=100500 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@monitoring-mongos.monitoring-2-0-21005 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-21005 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QN4x3282Hi +++ mktemp ++ local LAST_ERR=/tmp/tmp.GIzp6HLfEg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QN4x3282Hi ++ cat /tmp/tmp.GIzp6HLfEg ++ rm /tmp/tmp.QN4x3282Hi /tmp/tmp.GIzp6HLfEg ++ return 0 + local client_container=psmdb-client-6cd48df8b6-vv5md + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-vv5md -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-21005.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.m7Yh3hkMrq ++ mktemp + local LAST_ERR=/tmp/tmp.VR5dBABUNo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-vv5md -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-21005.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.m7Yh3hkMrq Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-21005.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-06-26T08:44:59.735Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("2f23f347-7594-44cb-b82d-8a94246d5681") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.VR5dBABUNo + rm /tmp/tmp.m7Yh3hkMrq /tmp/tmp.VR5dBABUNo + return 0 + insert_data_mongos 100600 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local data=100600 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + run_mongos 'use myApp\n db.test.insert({ x: 100600 })' myApp:myPass@monitoring-mongos.monitoring-2-0-21005 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=use myApp\n db.test.insert({ x: 100600 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-21005 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ufzgXR6MLz +++ mktemp ++ local LAST_ERR=/tmp/tmp.Hr7gFQj1Bj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ufzgXR6MLz ++ cat /tmp/tmp.Hr7gFQj1Bj ++ rm /tmp/tmp.ufzgXR6MLz /tmp/tmp.Hr7gFQj1Bj ++ return 0 + local client_container=psmdb-client-6cd48df8b6-vv5md + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-vv5md -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-21005.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.31TerTsYVY ++ mktemp + local LAST_ERR=/tmp/tmp.CF5DUmsc8z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-vv5md -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-21005.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.31TerTsYVY Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-21005.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-06-26T08:45:03.776Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("04c26db2-4511-4b37-a698-e0183454ae75") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.CF5DUmsc8z + rm /tmp/tmp.31TerTsYVY /tmp/tmp.CF5DUmsc8z + return 0 + insert_data_mongos 100700 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local data=100700 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + run_mongos 'use myApp\n db.test.insert({ x: 100700 })' myApp:myPass@monitoring-mongos.monitoring-2-0-21005 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=use myApp\n db.test.insert({ x: 100700 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-21005 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.png9p83f0k +++ mktemp ++ local LAST_ERR=/tmp/tmp.KzZlD4qR3i ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.png9p83f0k ++ cat /tmp/tmp.KzZlD4qR3i ++ rm /tmp/tmp.png9p83f0k /tmp/tmp.KzZlD4qR3i ++ return 0 + local client_container=psmdb-client-6cd48df8b6-vv5md + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-vv5md -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-21005.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.sBgrq3IVKz ++ mktemp + local LAST_ERR=/tmp/tmp.RsphXecGnG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-vv5md -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-21005.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sBgrq3IVKz Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-21005.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-06-26T08:45:06.634Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("672559a6-f196-4e8c-bb8e-79bcafaa02bb") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.RsphXecGnG + rm /tmp/tmp.sBgrq3IVKz /tmp/tmp.RsphXecGnG + return 0 + desc 'add PMM_SERVER_API_KEY for secret some-users' + set +o xtrace ----------------------------------------------------------------------------------- add PMM_SERVER_API_KEY for secret some-users ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.QlbGpERksF +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.QNXpXADo3v ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.QlbGpERksF ++++ cat /tmp/tmp.QNXpXADo3v ++++ rm /tmp/tmp.QlbGpERksF /tmp/tmp.QNXpXADo3v ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.QoULSqsI3S +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.akjX7l2aeo ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.QoULSqsI3S ++++ cat /tmp/tmp.akjX7l2aeo ++++ rm /tmp/tmp.QoULSqsI3S /tmp/tmp.akjX7l2aeo ++++ return 0 +++ local ip=34.46.27.114 +++ '[' -n 34.46.27.114 -a 34.46.27.114 '!=' null ']' +++ echo 34.46.27.114 +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@34.46.27.114/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 155 100 119 100 36 254 77 --:--:-- --:--:-- --:--:-- 332 + API_KEY='"eyJrIjoiQks2RGRZdUpvZVR0QWI4aE5OcHZ2blBnaHBYQkxZVGUiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' + kubectl_bin patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiQks2RGRZdUpvZVR0QWI4aE5OcHZ2blBnaHBYQkxZVGUiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' ++ mktemp + local LAST_OUT=/tmp/tmp.uLrOCd4MTu ++ mktemp + local LAST_ERR=/tmp/tmp.KhxDkvlosK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiQks2RGRZdUpvZVR0QWI4aE5OcHZ2blBnaHBYQkxZVGUiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uLrOCd4MTu secret/some-users patched + cat /tmp/tmp.KhxDkvlosK + rm /tmp/tmp.uLrOCd4MTu /tmp/tmp.KhxDkvlosK + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9mgV2ocvQK +++ mktemp ++ local LAST_ERR=/tmp/tmp.V5aYHUVoiD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9mgV2ocvQK ++ cat /tmp/tmp.V5aYHUVoiD ++ rm /tmp/tmp.9mgV2ocvQK /tmp/tmp.V5aYHUVoiD ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready.OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1rz4AIy3Fb +++ mktemp ++ local LAST_ERR=/tmp/tmp.bRyoCGac1E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1rz4AIy3Fb ++ cat /tmp/tmp.bRyoCGac1E ++ rm /tmp/tmp.1rz4AIy3Fb /tmp/tmp.bRyoCGac1E ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness......................................................................................... + sleep 90 + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 + local resource=statefulset/monitoring-rs0 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml + local new_result=/tmp/tmp.D0vv3Eolup/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-21005", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.p2nlVmB4V1 ++ mktemp + local LAST_ERR=/tmp/tmp.VoEBwY552p + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p2nlVmB4V1 + cat /tmp/tmp.VoEBwY552p + rm /tmp/tmp.p2nlVmB4V1 /tmp/tmp.VoEBwY552p + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.D0vv3Eolup/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.D0vv3Eolup/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.D0vv3Eolup/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml /tmp/tmp.D0vv3Eolup/statefulset_monitoring-rs0.yml + compare_kubectl service/monitoring-rs0 + local resource=service/monitoring-rs0 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml + local new_result=/tmp/tmp.D0vv3Eolup/service_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.WwjFJFV0B0 ++ mktemp + local LAST_ERR=/tmp/tmp.86jIEUQyC2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-21005", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WwjFJFV0B0 + cat /tmp/tmp.86jIEUQyC2 + rm /tmp/tmp.WwjFJFV0B0 /tmp/tmp.86jIEUQyC2 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.D0vv3Eolup/service_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.D0vv3Eolup/service_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.D0vv3Eolup/service_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml /tmp/tmp.D0vv3Eolup/service_monitoring-rs0.yml + compare_kubectl service/monitoring-mongos + local resource=service/monitoring-mongos + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml + local new_result=/tmp/tmp.D0vv3Eolup/service_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-21005", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.S6oywAWDbX ++ mktemp + local LAST_ERR=/tmp/tmp.UD0zVCQDqb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.S6oywAWDbX + cat /tmp/tmp.UD0zVCQDqb + rm /tmp/tmp.S6oywAWDbX /tmp/tmp.UD0zVCQDqb + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.D0vv3Eolup/service_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.D0vv3Eolup/service_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.D0vv3Eolup/service_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml /tmp/tmp.D0vv3Eolup/service_monitoring-mongos.yml + compare_kubectl statefulset/monitoring-cfg + local resource=statefulset/monitoring-cfg + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml + local new_result=/tmp/tmp.D0vv3Eolup/statefulset_monitoring-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-cfg ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-21005", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.AyxasejMgh ++ mktemp + local LAST_ERR=/tmp/tmp.9cEaXiPzzW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AyxasejMgh + cat /tmp/tmp.9cEaXiPzzW + rm /tmp/tmp.AyxasejMgh /tmp/tmp.9cEaXiPzzW + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.D0vv3Eolup/statefulset_monitoring-cfg.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.D0vv3Eolup/statefulset_monitoring-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.D0vv3Eolup/statefulset_monitoring-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml /tmp/tmp.D0vv3Eolup/statefulset_monitoring-cfg.yml + compare_kubectl statefulset/monitoring-mongos + local resource=statefulset/monitoring-mongos + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml + local new_result=/tmp/tmp.D0vv3Eolup/statefulset_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-mongos ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-21005", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.PpsyNxUahb ++ mktemp + local LAST_ERR=/tmp/tmp.o15p90nCEj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PpsyNxUahb + cat /tmp/tmp.o15p90nCEj + rm /tmp/tmp.PpsyNxUahb /tmp/tmp.o15p90nCEj + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.D0vv3Eolup/statefulset_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.D0vv3Eolup/statefulset_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.D0vv3Eolup/statefulset_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml /tmp/tmp.D0vv3Eolup/statefulset_monitoring-mongos.yml + desc 'check mongod metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongod metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-21005-monitoring-rs0-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-21005-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1719391749 ++ /usr/bin/date -u +%s + local end=1719391809 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0IpJyAwYks ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VxCDyqWY4w +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.0IpJyAwYks +++ cat /tmp/tmp.VxCDyqWY4w +++ rm /tmp/tmp.0IpJyAwYks /tmp/tmp.VxCDyqWY4w +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ghXvNvl5sp ++++ mktemp +++ local LAST_ERR=/tmp/tmp.l50z5FWgrK +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ghXvNvl5sp +++ cat /tmp/tmp.l50z5FWgrK +++ rm /tmp/tmp.ghXvNvl5sp /tmp/tmp.l50z5FWgrK +++ return 0 ++ local ip=34.46.27.114 ++ '[' -n 34.46.27.114 -a 34.46.27.114 '!=' null ']' ++ echo 34.46.27.114 ++ return + local endpoint=34.46.27.114 + grep '^"[0-9]' + jq '.data.result[0].values[][1]' + curl -s -k 'https://admin:admin@34.46.27.114/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-21005-monitoring-rs0-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-21005-monitoring-rs0-1%22%7D%29&start=1719391749&end=1719391809&step=60' "1719388087" "1719388087" + get_metric_values mongodb_connections monitoring-2-0-21005-monitoring-rs0-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-21005-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1719391752 ++ /usr/bin/date -u +%s + local end=1719391812 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xWojqphLwF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fEFTRLO9tO +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.xWojqphLwF +++ cat /tmp/tmp.fEFTRLO9tO +++ rm /tmp/tmp.xWojqphLwF /tmp/tmp.fEFTRLO9tO +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].ip' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vxlkRVr2eC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BrxSDLzTS6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.vxlkRVr2eC +++ cat /tmp/tmp.BrxSDLzTS6 +++ rm /tmp/tmp.vxlkRVr2eC /tmp/tmp.BrxSDLzTS6 +++ return 0 ++ local ip=34.46.27.114 ++ '[' -n 34.46.27.114 -a 34.46.27.114 '!=' null ']' ++ echo 34.46.27.114 ++ return + local endpoint=34.46.27.114 + curl -s -k 'https://admin:admin@34.46.27.114/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-21005-monitoring-rs0-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-21005-monitoring-rs0-1%22%7D%29&start=1719391752&end=1719391812&step=60' + grep '^"[0-9]' + jq '.data.result[0].values[][1]' "0" "0" + desc 'check mongo config metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongo config metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-21005-monitoring-cfg-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-21005-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1719391755 ++ /usr/bin/date -u +%s + local end=1719391815 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.moIvoeeFHn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UN1VddHECz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.moIvoeeFHn +++ cat /tmp/tmp.UN1VddHECz +++ rm /tmp/tmp.moIvoeeFHn /tmp/tmp.UN1VddHECz +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.sMOiNCU97z ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ja01wHryIW +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.sMOiNCU97z +++ cat /tmp/tmp.ja01wHryIW +++ rm /tmp/tmp.sMOiNCU97z /tmp/tmp.ja01wHryIW +++ return 0 ++ local ip=34.46.27.114 ++ '[' -n 34.46.27.114 -a 34.46.27.114 '!=' null ']' ++ echo 34.46.27.114 ++ return + local endpoint=34.46.27.114 + jq '.data.result[0].values[][1]' + grep '^"[0-9]' + curl -s -k 'https://admin:admin@34.46.27.114/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-21005-monitoring-cfg-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-21005-monitoring-cfg-1%22%7D%29&start=1719391755&end=1719391815&step=60' "1719388087" "1719388087" + get_metric_values mongodb_connections monitoring-2-0-21005-monitoring-cfg-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-21005-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1719391758 ++ /usr/bin/date -u +%s + local end=1719391818 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ sed -e 's/^"//; s/"$//;' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Q4UIJhMT4x ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6pPOgwvJon +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Q4UIJhMT4x +++ cat /tmp/tmp.6pPOgwvJon +++ rm /tmp/tmp.Q4UIJhMT4x /tmp/tmp.6pPOgwvJon +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ jq '.status.loadBalancer.ingress[].ip' +++ local LAST_OUT=/tmp/tmp.Dd7QQYO5Kp +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3sRzR6IUQA +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Dd7QQYO5Kp +++ cat /tmp/tmp.3sRzR6IUQA +++ rm /tmp/tmp.Dd7QQYO5Kp /tmp/tmp.3sRzR6IUQA +++ return 0 ++ local ip=34.46.27.114 ++ '[' -n 34.46.27.114 -a 34.46.27.114 '!=' null ']' ++ echo 34.46.27.114 ++ return + local endpoint=34.46.27.114 + curl -s -k 'https://admin:admin@34.46.27.114/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-21005-monitoring-cfg-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-21005-monitoring-cfg-1%22%7D%29&start=1719391758&end=1719391818&step=60' + grep '^"[0-9]' + jq '.data.result[0].values[][1]' "0" "0" + desc 'check mongos metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongos metrics ----------------------------------------------------------------------------------- ++ kubectl get pod -l app.kubernetes.io/component=mongos -o 'jsonpath={.items[0].metadata.name}' + MONGOS_POD_NAME=monitoring-mongos-0 + get_metric_values node_boot_time_seconds monitoring-2-0-21005-monitoring-mongos-0 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-21005-monitoring-mongos-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1719391763 ++ /usr/bin/date -u +%s + local end=1719391823 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.VHyYUSyvt6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UNMsoMDs4C +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.VHyYUSyvt6 +++ cat /tmp/tmp.UNMsoMDs4C +++ rm /tmp/tmp.VHyYUSyvt6 /tmp/tmp.UNMsoMDs4C +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.V7bowHnZne ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XbKNoYblwQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.V7bowHnZne +++ cat /tmp/tmp.XbKNoYblwQ +++ rm /tmp/tmp.V7bowHnZne /tmp/tmp.XbKNoYblwQ +++ return 0 ++ local ip=34.46.27.114 ++ '[' -n 34.46.27.114 -a 34.46.27.114 '!=' null ']' ++ echo 34.46.27.114 ++ return + local endpoint=34.46.27.114 + jq '.data.result[0].values[][1]' + grep '^"[0-9]' + curl -s -k 'https://admin:admin@34.46.27.114/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-21005-monitoring-mongos-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-21005-monitoring-mongos-0%22%7D%29&start=1719391763&end=1719391823&step=60' "1719388087" "1719388087" + sleep 90 + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan_values mongodb dev-mongod admin:admin + local service_type=mongodb + local environment=dev-mongod + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2024-06-25T20:51:56+00:00 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2024-06-26T08:51:56+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GjraTFDrkm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XUo9GlAFKS +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.GjraTFDrkm +++ cat /tmp/tmp.XUo9GlAFKS +++ rm /tmp/tmp.GjraTFDrkm /tmp/tmp.XUo9GlAFKS +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.ewLzhm66kl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Z7ow7dMWK5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ewLzhm66kl +++ cat /tmp/tmp.Z7ow7dMWK5 +++ rm /tmp/tmp.ewLzhm66kl /tmp/tmp.Z7ow7dMWK5 +++ return 0 ++ local ip=34.46.27.114 ++ '[' -n 34.46.27.114 -a 34.46.27.114 '!=' null ']' ++ echo 34.46.27.114 ++ return + endpoint=34.46.27.114 + cat + local response ++ curl -s -k -XPOST -d @payload.json https://admin:admin@34.46.27.114/v0/qan/GetReport ++ jq '.rows[].fingerprint' + response='"TOTAL" "FIND version" "DBSTATS clusterTime,dbStats,hash,id,keyId,lsid,mode,scale,signature" "FIND system.version _id" "FIND oplog.rs"' + rm -f payload.json + [[ "TOTAL" "FIND version" "DBSTATS clusterTime,dbStats,hash,id,keyId,lsid,mode,scale,signature" "FIND system.version _id" "FIND oplog.rs" == \n\u\l\l ]] + get_qan_values mongodb dev-mongos admin:admin + local service_type=mongodb + local environment=dev-mongos + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2024-06-25T20:51:59+00:00 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2024-06-26T08:51:59+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2xfsWu0WTD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wZt68Fnpmu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2xfsWu0WTD +++ cat /tmp/tmp.wZt68Fnpmu +++ rm /tmp/tmp.2xfsWu0WTD /tmp/tmp.wZt68Fnpmu +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.251ZYqOaGZ +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ local LAST_ERR=/tmp/tmp.EtBifpSWXy +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.251ZYqOaGZ +++ cat /tmp/tmp.EtBifpSWXy +++ rm /tmp/tmp.251ZYqOaGZ /tmp/tmp.EtBifpSWXy +++ return 0 ++ local ip=34.46.27.114 ++ '[' -n 34.46.27.114 -a 34.46.27.114 '!=' null ']' ++ echo 34.46.27.114 ++ return + endpoint=34.46.27.114 + cat + local response ++ curl -s -k -XPOST -d @payload.json https://admin:admin@34.46.27.114/v0/qan/GetReport ++ jq '.rows[].fingerprint' + response='"TOTAL" "DBSTATS application,architecture,client,clusterTime,db,dbStats,driver,hash,host,id,keyId,level,lsid,mayBypassWriteBlocking,mongos,name,os,platform,provenance,readConcern,role,scale,signature,type,uid,user,version"' + rm -f payload.json + [[ "TOTAL" "DBSTATS application,architecture,client,clusterTime,db,dbStats,driver,hash,host,id,keyId,level,lsid,mayBypassWriteBlocking,mongos,name,os,platform,provenance,readConcern,role,scale,signature,type,uid,user,version" == \n\u\l\l ]] + nodeList=($(get_node_id_from_pmm)) ++ get_node_id_from_pmm ++ nodeList=() ++ local -a nodeList +++ kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name ++++ mktemp +++ local LAST_OUT=/tmp/tmp.m3ocN3kcst ++++ mktemp +++ local LAST_ERR=/tmp/tmp.dDUprdlmvx +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.m3ocN3kcst +++ cat /tmp/tmp.dDUprdlmvx +++ rm /tmp/tmp.m3ocN3kcst /tmp/tmp.dDUprdlmvx +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hsuv46RacU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.g5H5UuJGcy +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.hsuv46RacU +++ cat /tmp/tmp.g5H5UuJGcy +++ rm /tmp/tmp.hsuv46RacU /tmp/tmp.g5H5UuJGcy +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pqkhKGSnv4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.R6GSdSN0sI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.pqkhKGSnv4 +++ cat /tmp/tmp.R6GSdSN0sI +++ rm /tmp/tmp.pqkhKGSnv4 /tmp/tmp.R6GSdSN0sI +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gegA69nX97 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xCQZRjo751 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.gegA69nX97 +++ cat /tmp/tmp.xCQZRjo751 +++ rm /tmp/tmp.gegA69nX97 /tmp/tmp.xCQZRjo751 +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TevhGMsUVt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GhNUPIeLZY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.TevhGMsUVt +++ cat /tmp/tmp.GhNUPIeLZY +++ rm /tmp/tmp.TevhGMsUVt /tmp/tmp.GhNUPIeLZY +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.E2CLj3iEJ8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pjocTVMPYG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.E2CLj3iEJ8 +++ cat /tmp/tmp.pjocTVMPYG +++ rm /tmp/tmp.E2CLj3iEJ8 /tmp/tmp.pjocTVMPYG +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WNZDFcXSqw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.WLlDUhRp05 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.WNZDFcXSqw +++ cat /tmp/tmp.WLlDUhRp05 +++ rm /tmp/tmp.WNZDFcXSqw /tmp/tmp.WLlDUhRp05 +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Myn9NWXyYd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GL8f7CGJpM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Myn9NWXyYd +++ cat /tmp/tmp.GL8f7CGJpM +++ rm /tmp/tmp.Myn9NWXyYd /tmp/tmp.GL8f7CGJpM +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Jc17Li1G98 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.S6Tc0haAQH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Jc17Li1G98 +++ cat /tmp/tmp.S6Tc0haAQH +++ rm /tmp/tmp.Jc17Li1G98 /tmp/tmp.S6Tc0haAQH +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Xp6ERwnPYL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zedxIOczJk +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Xp6ERwnPYL +++ cat /tmp/tmp.zedxIOczJk +++ rm /tmp/tmp.Xp6ERwnPYL /tmp/tmp.zedxIOczJk +++ return 0 ++ echo /node_id/214467b1-ca7d-435c-b172-b834ffc6fc80 /node_id/943106e0-8f24-4ec5-bbb8-fe0614df8330 /node_id/e2309140-709e-4393-b1e5-8d9311552425 /node_id/542f6cc6-db70-4f18-a4d1-4497f8ae5afe /node_id/cb3024b4-1e3e-4258-9894-04253cdc03c5 /node_id/35f4348b-4bb2-4d0b-99c0-b961756c4b93 /node_id/699d53f7-537c-4b12-b8e0-35c417632209 /node_id/955fc6e2-f087-4713-8f3c-047be05bf7e8 /node_id/4a4b9313-e33b-4cc8-bd79-a597d67a1164 + nodeList_from_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/214467b1-ca7d-435c-b172-b834ffc6fc80 /node_id/943106e0-8f24-4ec5-bbb8-fe0614df8330 /node_id/e2309140-709e-4393-b1e5-8d9311552425 /node_id/542f6cc6-db70-4f18-a4d1-4497f8ae5afe /node_id/cb3024b4-1e3e-4258-9894-04253cdc03c5 /node_id/35f4348b-4bb2-4d0b-99c0-b961756c4b93 /node_id/699d53f7-537c-4b12-b8e0-35c417632209 /node_id/955fc6e2-f087-4713-8f3c-047be05bf7e8 /node_id/4a4b9313-e33b-4cc8-bd79-a597d67a1164 ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.RhHdyljl65 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.zMdzbOVzoN +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ grep /node_id/214467b1-ca7d-435c-b172-b834ffc6fc80 +++ awk '{print $4}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.RhHdyljl65 +++++ cat /tmp/tmp.zMdzbOVzoN Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.RhHdyljl65 +++++ cat /tmp/tmp.zMdzbOVzoN Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.RhHdyljl65 +++++ cat /tmp/tmp.zMdzbOVzoN Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.RhHdyljl65 +++++ cat /tmp/tmp.zMdzbOVzoN Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.RhHdyljl65 /tmp/tmp.zMdzbOVzoN +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.R3I9UYdjtn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7hSeDYMWtz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.R3I9UYdjtn +++ cat /tmp/tmp.7hSeDYMWtz command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.R3I9UYdjtn +++ cat /tmp/tmp.7hSeDYMWtz command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.R3I9UYdjtn +++ cat /tmp/tmp.7hSeDYMWtz command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.R3I9UYdjtn +++ cat /tmp/tmp.7hSeDYMWtz command terminated with exit code 1 +++ rm /tmp/tmp.R3I9UYdjtn /tmp/tmp.7hSeDYMWtz +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/943106e0-8f24-4ec5-bbb8-fe0614df8330 ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++ awk '{print $4}' +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.LBQB4raZLe ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.1BgGder9QX +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.LBQB4raZLe +++++ cat /tmp/tmp.1BgGder9QX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.LBQB4raZLe +++++ cat /tmp/tmp.1BgGder9QX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.LBQB4raZLe +++++ cat /tmp/tmp.1BgGder9QX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.LBQB4raZLe +++++ cat /tmp/tmp.1BgGder9QX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.LBQB4raZLe /tmp/tmp.1BgGder9QX +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.KBHNWhwWDS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xJ5sDArQnL +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.KBHNWhwWDS +++ cat /tmp/tmp.xJ5sDArQnL command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.KBHNWhwWDS +++ cat /tmp/tmp.xJ5sDArQnL command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.KBHNWhwWDS +++ cat /tmp/tmp.xJ5sDArQnL command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.KBHNWhwWDS +++ cat /tmp/tmp.xJ5sDArQnL command terminated with exit code 1 +++ rm /tmp/tmp.KBHNWhwWDS /tmp/tmp.xJ5sDArQnL +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep /node_id/e2309140-709e-4393-b1e5-8d9311552425 ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.KoZiPOCaGh ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.1w4WaAGeej +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.KoZiPOCaGh +++++ cat /tmp/tmp.1w4WaAGeej Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.KoZiPOCaGh +++++ cat /tmp/tmp.1w4WaAGeej Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.KoZiPOCaGh +++++ cat /tmp/tmp.1w4WaAGeej Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.KoZiPOCaGh +++++ cat /tmp/tmp.1w4WaAGeej Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.KoZiPOCaGh /tmp/tmp.1w4WaAGeej +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.EGmgestm1m ++++ mktemp +++ local LAST_ERR=/tmp/tmp.G5zfy1e9vO +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.EGmgestm1m +++ cat /tmp/tmp.G5zfy1e9vO command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.EGmgestm1m +++ cat /tmp/tmp.G5zfy1e9vO command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.EGmgestm1m +++ cat /tmp/tmp.G5zfy1e9vO command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.EGmgestm1m +++ cat /tmp/tmp.G5zfy1e9vO command terminated with exit code 1 +++ rm /tmp/tmp.EGmgestm1m /tmp/tmp.G5zfy1e9vO +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/542f6cc6-db70-4f18-a4d1-4497f8ae5afe +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.xm41Jc54SU ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.ETsu3AV7Fk +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.xm41Jc54SU +++++ cat /tmp/tmp.ETsu3AV7Fk Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.xm41Jc54SU +++++ cat /tmp/tmp.ETsu3AV7Fk Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.xm41Jc54SU +++++ cat /tmp/tmp.ETsu3AV7Fk Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.xm41Jc54SU +++++ cat /tmp/tmp.ETsu3AV7Fk Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.xm41Jc54SU /tmp/tmp.ETsu3AV7Fk +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.q29kq7mBwx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TRP1QQHTIs +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.q29kq7mBwx +++ cat /tmp/tmp.TRP1QQHTIs command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.q29kq7mBwx +++ cat /tmp/tmp.TRP1QQHTIs command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.q29kq7mBwx +++ cat /tmp/tmp.TRP1QQHTIs command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.q29kq7mBwx +++ cat /tmp/tmp.TRP1QQHTIs command terminated with exit code 1 +++ rm /tmp/tmp.q29kq7mBwx /tmp/tmp.TRP1QQHTIs +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/cb3024b4-1e3e-4258-9894-04253cdc03c5 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.vZhBZMhvZi ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.YxdpRTc4IS +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.vZhBZMhvZi +++++ cat /tmp/tmp.YxdpRTc4IS Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.vZhBZMhvZi +++++ cat /tmp/tmp.YxdpRTc4IS Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.vZhBZMhvZi +++++ cat /tmp/tmp.YxdpRTc4IS Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.vZhBZMhvZi +++++ cat /tmp/tmp.YxdpRTc4IS Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.vZhBZMhvZi /tmp/tmp.YxdpRTc4IS +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.HPzWzx6mBC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.suoJ4FQYBV +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.HPzWzx6mBC +++ cat /tmp/tmp.suoJ4FQYBV command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.HPzWzx6mBC +++ cat /tmp/tmp.suoJ4FQYBV command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.HPzWzx6mBC +++ cat /tmp/tmp.suoJ4FQYBV command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.HPzWzx6mBC +++ cat /tmp/tmp.suoJ4FQYBV command terminated with exit code 1 +++ rm /tmp/tmp.HPzWzx6mBC /tmp/tmp.suoJ4FQYBV +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/35f4348b-4bb2-4d0b-99c0-b961756c4b93 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.G599ffXSLX ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.lkgOWkau2C +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.G599ffXSLX +++++ cat /tmp/tmp.lkgOWkau2C Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.G599ffXSLX +++++ cat /tmp/tmp.lkgOWkau2C Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.G599ffXSLX +++++ cat /tmp/tmp.lkgOWkau2C Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.G599ffXSLX +++++ cat /tmp/tmp.lkgOWkau2C Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.G599ffXSLX /tmp/tmp.lkgOWkau2C +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3iyGDeMCiV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PklKC3b1SB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.3iyGDeMCiV +++ cat /tmp/tmp.PklKC3b1SB command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.3iyGDeMCiV +++ cat /tmp/tmp.PklKC3b1SB command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.3iyGDeMCiV +++ cat /tmp/tmp.PklKC3b1SB command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.3iyGDeMCiV +++ cat /tmp/tmp.PklKC3b1SB command terminated with exit code 1 +++ rm /tmp/tmp.3iyGDeMCiV /tmp/tmp.PklKC3b1SB +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/699d53f7-537c-4b12-b8e0-35c417632209 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.SVUoWDZ9Tk ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.Kt5Lw5dLUC +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.SVUoWDZ9Tk +++++ cat /tmp/tmp.Kt5Lw5dLUC Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.SVUoWDZ9Tk +++++ cat /tmp/tmp.Kt5Lw5dLUC Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.SVUoWDZ9Tk +++++ cat /tmp/tmp.Kt5Lw5dLUC Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.SVUoWDZ9Tk +++++ cat /tmp/tmp.Kt5Lw5dLUC Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.SVUoWDZ9Tk /tmp/tmp.Kt5Lw5dLUC +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TFGMNyoqZG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LfAGsxVGTu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.TFGMNyoqZG +++ cat /tmp/tmp.LfAGsxVGTu command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.TFGMNyoqZG +++ cat /tmp/tmp.LfAGsxVGTu command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.TFGMNyoqZG +++ cat /tmp/tmp.LfAGsxVGTu command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.TFGMNyoqZG +++ cat /tmp/tmp.LfAGsxVGTu command terminated with exit code 1 +++ rm /tmp/tmp.TFGMNyoqZG /tmp/tmp.LfAGsxVGTu +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/955fc6e2-f087-4713-8f3c-047be05bf7e8 ++++ get_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.Bukv7BfogA ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.v42M5f6qmf +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.Bukv7BfogA +++++ cat /tmp/tmp.v42M5f6qmf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.Bukv7BfogA +++++ cat /tmp/tmp.v42M5f6qmf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.Bukv7BfogA +++++ cat /tmp/tmp.v42M5f6qmf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.Bukv7BfogA +++++ cat /tmp/tmp.v42M5f6qmf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.Bukv7BfogA /tmp/tmp.v42M5f6qmf +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.x5O3anrByu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.lRtfExg6MB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.x5O3anrByu +++ cat /tmp/tmp.lRtfExg6MB command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.x5O3anrByu +++ cat /tmp/tmp.lRtfExg6MB command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.x5O3anrByu +++ cat /tmp/tmp.lRtfExg6MB command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.x5O3anrByu +++ cat /tmp/tmp.lRtfExg6MB command terminated with exit code 1 +++ rm /tmp/tmp.x5O3anrByu /tmp/tmp.lRtfExg6MB +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.5Z8zGESYWH +++ grep /node_id/4a4b9313-e33b-4cc8-bd79-a597d67a1164 +++ awk '{print $4}' ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.LaEyK7M8iX +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.5Z8zGESYWH +++++ cat /tmp/tmp.LaEyK7M8iX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.5Z8zGESYWH +++++ cat /tmp/tmp.LaEyK7M8iX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.5Z8zGESYWH +++++ cat /tmp/tmp.LaEyK7M8iX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.5Z8zGESYWH +++++ cat /tmp/tmp.LaEyK7M8iX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.5Z8zGESYWH /tmp/tmp.LaEyK7M8iX +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.BFQ0MRv0XF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ncxxzMgTLF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.BFQ0MRv0XF +++ cat /tmp/tmp.ncxxzMgTLF command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.BFQ0MRv0XF +++ cat /tmp/tmp.ncxxzMgTLF command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.BFQ0MRv0XF +++ cat /tmp/tmp.ncxxzMgTLF command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.BFQ0MRv0XF +++ cat /tmp/tmp.ncxxzMgTLF command terminated with exit code 1 +++ rm /tmp/tmp.BFQ0MRv0XF /tmp/tmp.ncxxzMgTLF +++ return 1 ++ echo + kubectl_bin patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.hdBHZ7eMZx ++ mktemp + local LAST_ERR=/tmp/tmp.c7FyES95aW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hdBHZ7eMZx perconaservermongodb.psmdb.percona.com/monitoring patched + cat /tmp/tmp.c7FyES95aW + rm /tmp/tmp.hdBHZ7eMZx /tmp/tmp.c7FyES95aW + return 0 + wait_for_delete pod/monitoring-mongos-0 + local res=pod/monitoring-mongos-0 + local wait_time=60 + set +o xtrace pod/monitoring-mongos-0 - .......................Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found + wait_for_delete pod/monitoring-rs0-0 + local res=pod/monitoring-rs0-0 + local wait_time=60 + set +o xtrace pod/monitoring-rs0-0 - ...........Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found + wait_for_delete pod/monitoring-cfg-0 + local res=pod/monitoring-cfg-0 + local wait_time=60 + set +o xtrace pod/monitoring-cfg-0 - ...Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found + desc 'check if services are not deleted' + set +o xtrace ----------------------------------------------------------------------------------- check if services are not deleted ----------------------------------------------------------------------------------- + kubectl_bin get svc monitoring-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.zVvqw5Dv5t ++ mktemp + local LAST_ERR=/tmp/tmp.Uu2UWb1upY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get svc monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zVvqw5Dv5t NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-rs0 ClusterIP None 27017/TCP 17m + cat /tmp/tmp.Uu2UWb1upY + rm /tmp/tmp.zVvqw5Dv5t /tmp/tmp.Uu2UWb1upY + return 0 + kubectl_bin get svc monitoring-cfg ++ mktemp + local LAST_OUT=/tmp/tmp.NmBKrF6F0c ++ mktemp + local LAST_ERR=/tmp/tmp.KeM4fqdbeF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get svc monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NmBKrF6F0c NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-cfg ClusterIP None 27017/TCP 17m + cat /tmp/tmp.KeM4fqdbeF + rm /tmp/tmp.NmBKrF6F0c /tmp/tmp.KeM4fqdbeF + return 0 + kubectl_bin get svc monitoring-mongos ++ mktemp + local LAST_OUT=/tmp/tmp.kiUmvJe299 ++ mktemp + local LAST_ERR=/tmp/tmp.rUmDkHXSlo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get svc monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kiUmvJe299 NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-mongos ClusterIP 10.162.25.188 27017/TCP 16m + cat /tmp/tmp.rUmDkHXSlo + rm /tmp/tmp.kiUmvJe299 /tmp/tmp.rUmDkHXSlo + return 0 + does_node_id_exists_in_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/214467b1-ca7d-435c-b172-b834ffc6fc80 /node_id/943106e0-8f24-4ec5-bbb8-fe0614df8330 /node_id/e2309140-709e-4393-b1e5-8d9311552425 /node_id/542f6cc6-db70-4f18-a4d1-4497f8ae5afe /node_id/cb3024b4-1e3e-4258-9894-04253cdc03c5 /node_id/35f4348b-4bb2-4d0b-99c0-b961756c4b93 /node_id/699d53f7-537c-4b12-b8e0-35c417632209 /node_id/955fc6e2-f087-4713-8f3c-047be05bf7e8 /node_id/4a4b9313-e33b-4cc8-bd79-a597d67a1164 ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/214467b1-ca7d-435c-b172-b834ffc6fc80 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.ElztUyptqO ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.98RkOz2Wpm +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.ElztUyptqO +++++ cat /tmp/tmp.98RkOz2Wpm Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.ElztUyptqO +++++ cat /tmp/tmp.98RkOz2Wpm Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.ElztUyptqO +++++ cat /tmp/tmp.98RkOz2Wpm Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.ElztUyptqO +++++ cat /tmp/tmp.98RkOz2Wpm Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.ElztUyptqO /tmp/tmp.98RkOz2Wpm +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Vkslb5tyW9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2gC63xViRm +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Vkslb5tyW9 +++ cat /tmp/tmp.2gC63xViRm command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Vkslb5tyW9 +++ cat /tmp/tmp.2gC63xViRm command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Vkslb5tyW9 +++ cat /tmp/tmp.2gC63xViRm command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.Vkslb5tyW9 +++ cat /tmp/tmp.2gC63xViRm command terminated with exit code 1 +++ rm /tmp/tmp.Vkslb5tyW9 /tmp/tmp.2gC63xViRm +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ grep /node_id/943106e0-8f24-4ec5-bbb8-fe0614df8330 +++ awk '{print $4}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.kloVMilXSN ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.6UKAKOCBfz +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.kloVMilXSN +++++ cat /tmp/tmp.6UKAKOCBfz Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.kloVMilXSN +++++ cat /tmp/tmp.6UKAKOCBfz Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.kloVMilXSN +++++ cat /tmp/tmp.6UKAKOCBfz Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.kloVMilXSN +++++ cat /tmp/tmp.6UKAKOCBfz Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.kloVMilXSN /tmp/tmp.6UKAKOCBfz +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.F7XLnpQ0PV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gHxvYSRRJJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.F7XLnpQ0PV +++ cat /tmp/tmp.gHxvYSRRJJ command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.F7XLnpQ0PV +++ cat /tmp/tmp.gHxvYSRRJJ command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.F7XLnpQ0PV +++ cat /tmp/tmp.gHxvYSRRJJ command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.F7XLnpQ0PV +++ cat /tmp/tmp.gHxvYSRRJJ command terminated with exit code 1 +++ rm /tmp/tmp.F7XLnpQ0PV /tmp/tmp.gHxvYSRRJJ +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep /node_id/e2309140-709e-4393-b1e5-8d9311552425 ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.PvXmBXnbrs ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.M2mOwU7ZIr +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.PvXmBXnbrs +++++ cat /tmp/tmp.M2mOwU7ZIr Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.PvXmBXnbrs +++++ cat /tmp/tmp.M2mOwU7ZIr Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.PvXmBXnbrs +++++ cat /tmp/tmp.M2mOwU7ZIr Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.PvXmBXnbrs +++++ cat /tmp/tmp.M2mOwU7ZIr Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.PvXmBXnbrs /tmp/tmp.M2mOwU7ZIr +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ooUTOxwBR9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.naT2biJGsN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.ooUTOxwBR9 +++ cat /tmp/tmp.naT2biJGsN command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.ooUTOxwBR9 +++ cat /tmp/tmp.naT2biJGsN command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.ooUTOxwBR9 +++ cat /tmp/tmp.naT2biJGsN command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.ooUTOxwBR9 +++ cat /tmp/tmp.naT2biJGsN command terminated with exit code 1 +++ rm /tmp/tmp.ooUTOxwBR9 /tmp/tmp.naT2biJGsN +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/542f6cc6-db70-4f18-a4d1-4497f8ae5afe +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.rXOrjS8sIt ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.j5wLPk9GK8 +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.rXOrjS8sIt +++++ cat /tmp/tmp.j5wLPk9GK8 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.rXOrjS8sIt +++++ cat /tmp/tmp.j5wLPk9GK8 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.rXOrjS8sIt +++++ cat /tmp/tmp.j5wLPk9GK8 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.rXOrjS8sIt +++++ cat /tmp/tmp.j5wLPk9GK8 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.rXOrjS8sIt /tmp/tmp.j5wLPk9GK8 +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.H0jtpPtoVN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.oVgBSAGOD0 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.H0jtpPtoVN +++ cat /tmp/tmp.oVgBSAGOD0 command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.H0jtpPtoVN +++ cat /tmp/tmp.oVgBSAGOD0 command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.H0jtpPtoVN +++ cat /tmp/tmp.oVgBSAGOD0 command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.H0jtpPtoVN +++ cat /tmp/tmp.oVgBSAGOD0 command terminated with exit code 1 +++ rm /tmp/tmp.H0jtpPtoVN /tmp/tmp.oVgBSAGOD0 +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/cb3024b4-1e3e-4258-9894-04253cdc03c5 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.H9cylQxqBz ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.9V4JeFkRzG +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.H9cylQxqBz +++++ cat /tmp/tmp.9V4JeFkRzG Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.H9cylQxqBz +++++ cat /tmp/tmp.9V4JeFkRzG Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.H9cylQxqBz +++++ cat /tmp/tmp.9V4JeFkRzG Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.H9cylQxqBz +++++ cat /tmp/tmp.9V4JeFkRzG Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.H9cylQxqBz /tmp/tmp.9V4JeFkRzG +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.S1S57hsiNz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BzI8XEpDoL +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.S1S57hsiNz +++ cat /tmp/tmp.BzI8XEpDoL command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.S1S57hsiNz +++ cat /tmp/tmp.BzI8XEpDoL command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.S1S57hsiNz +++ cat /tmp/tmp.BzI8XEpDoL command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.S1S57hsiNz +++ cat /tmp/tmp.BzI8XEpDoL command terminated with exit code 1 +++ rm /tmp/tmp.S1S57hsiNz /tmp/tmp.BzI8XEpDoL +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/35f4348b-4bb2-4d0b-99c0-b961756c4b93 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.ggx0izyl42 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.esMCURG11E +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.ggx0izyl42 +++++ cat /tmp/tmp.esMCURG11E Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.ggx0izyl42 +++++ cat /tmp/tmp.esMCURG11E Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.ggx0izyl42 +++++ cat /tmp/tmp.esMCURG11E Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.ggx0izyl42 +++++ cat /tmp/tmp.esMCURG11E Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.ggx0izyl42 /tmp/tmp.esMCURG11E +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.KTJtIyZA7u ++++ mktemp +++ local LAST_ERR=/tmp/tmp.EpUz531eSV +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.KTJtIyZA7u +++ cat /tmp/tmp.EpUz531eSV command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.KTJtIyZA7u +++ cat /tmp/tmp.EpUz531eSV command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.KTJtIyZA7u +++ cat /tmp/tmp.EpUz531eSV command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.KTJtIyZA7u +++ cat /tmp/tmp.EpUz531eSV command terminated with exit code 1 +++ rm /tmp/tmp.KTJtIyZA7u /tmp/tmp.EpUz531eSV +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.LcQRXg8NcJ ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.fIdnQKvrNA +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ grep /node_id/699d53f7-537c-4b12-b8e0-35c417632209 +++ awk '{print $4}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.LcQRXg8NcJ +++++ cat /tmp/tmp.fIdnQKvrNA Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.LcQRXg8NcJ +++++ cat /tmp/tmp.fIdnQKvrNA Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.LcQRXg8NcJ +++++ cat /tmp/tmp.fIdnQKvrNA Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.LcQRXg8NcJ +++++ cat /tmp/tmp.fIdnQKvrNA Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.LcQRXg8NcJ /tmp/tmp.fIdnQKvrNA +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.VDY2OFCHqm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Pa9DLnq3HG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.VDY2OFCHqm +++ cat /tmp/tmp.Pa9DLnq3HG command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.VDY2OFCHqm +++ cat /tmp/tmp.Pa9DLnq3HG command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.VDY2OFCHqm +++ cat /tmp/tmp.Pa9DLnq3HG command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.VDY2OFCHqm +++ cat /tmp/tmp.Pa9DLnq3HG command terminated with exit code 1 +++ rm /tmp/tmp.VDY2OFCHqm /tmp/tmp.Pa9DLnq3HG +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/955fc6e2-f087-4713-8f3c-047be05bf7e8 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.WLJuPrV0RW ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.9ht7atrTxi +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.WLJuPrV0RW +++++ cat /tmp/tmp.9ht7atrTxi Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.WLJuPrV0RW +++++ cat /tmp/tmp.9ht7atrTxi Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.WLJuPrV0RW +++++ cat /tmp/tmp.9ht7atrTxi Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.WLJuPrV0RW +++++ cat /tmp/tmp.9ht7atrTxi Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.WLJuPrV0RW /tmp/tmp.9ht7atrTxi +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kIsUW1d2Zs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TZ5MH3u5pP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.kIsUW1d2Zs +++ cat /tmp/tmp.TZ5MH3u5pP command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.kIsUW1d2Zs +++ cat /tmp/tmp.TZ5MH3u5pP command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.kIsUW1d2Zs +++ cat /tmp/tmp.TZ5MH3u5pP command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.kIsUW1d2Zs +++ cat /tmp/tmp.TZ5MH3u5pP command terminated with exit code 1 +++ rm /tmp/tmp.kIsUW1d2Zs /tmp/tmp.TZ5MH3u5pP +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/4a4b9313-e33b-4cc8-bd79-a597d67a1164 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.vhLecJ34Ew ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.y3bbGXE2xe +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.vhLecJ34Ew +++++ cat /tmp/tmp.y3bbGXE2xe Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.vhLecJ34Ew +++++ cat /tmp/tmp.y3bbGXE2xe Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.vhLecJ34Ew +++++ cat /tmp/tmp.y3bbGXE2xe Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.vhLecJ34Ew +++++ cat /tmp/tmp.y3bbGXE2xe Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.vhLecJ34Ew /tmp/tmp.y3bbGXE2xe +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fTaNWVaHw4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GbcsGGXP5q +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.fTaNWVaHw4 +++ cat /tmp/tmp.GbcsGGXP5q command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.fTaNWVaHw4 +++ cat /tmp/tmp.GbcsGGXP5q command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21005 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.fTaNWVaHw4 +++ cat /tmp/tmp.GbcsGGXP5q command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.fTaNWVaHw4 +++ cat /tmp/tmp.GbcsGGXP5q command terminated with exit code 1 +++ rm /tmp/tmp.fTaNWVaHw4 /tmp/tmp.GbcsGGXP5q +++ return 1 ++ echo + [[ -n '' ]] ++ kubectl_bin logs monitoring-rs0-0 pmm-client ++ grep -c 'cannot auto discover databases and collections' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pgfc95RihT +++ mktemp ++ local LAST_ERR=/tmp/tmp.HkuX50jdCo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.pgfc95RihT ++ cat /tmp/tmp.HkuX50jdCo error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-21005" ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.pgfc95RihT ++ cat /tmp/tmp.HkuX50jdCo error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-21005" ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.pgfc95RihT ++ cat /tmp/tmp.HkuX50jdCo error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-21005" ++ sleep 8 ++ cat /tmp/tmp.pgfc95RihT ++ cat /tmp/tmp.HkuX50jdCo error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-21005" ++ rm /tmp/tmp.pgfc95RihT /tmp/tmp.HkuX50jdCo ++ return 1 + [[ 0 != 0 ]] + desc 'check for passwords leak' + set +o xtrace ----------------------------------------------------------------------------------- check for passwords leak ----------------------------------------------------------------------------------- + check_passwords_leak + local secrets + local passwords + local pods ++ jq -r '.items[].data | to_entries | .[] | select(.key | (contains("_PASSWORD"))) | .value' ++ kubectl_bin get secrets -o json +++ mktemp ++ local LAST_OUT=/tmp/tmp.G7NcYuzoAU +++ mktemp ++ local LAST_ERR=/tmp/tmp.5dm1SuGiHC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.G7NcYuzoAU ++ cat /tmp/tmp.5dm1SuGiHC ++ rm /tmp/tmp.G7NcYuzoAU /tmp/tmp.5dm1SuGiHC ++ return 0 + secrets='YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2' + echo secrets=YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 secrets=YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo + passwords='backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2' + echo passwords=backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 passwords=backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 ++ kubectl_bin get pods -o name ++ awk -F / '{print $2}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nGN6WqUe4i +++ mktemp ++ local LAST_ERR=/tmp/tmp.7dEZAtakx7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods -o name ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nGN6WqUe4i ++ cat /tmp/tmp.7dEZAtakx7 ++ rm /tmp/tmp.nGN6WqUe4i /tmp/tmp.7dEZAtakx7 ++ return 0 + pods='monitoring-0 psmdb-client-6cd48df8b6-vv5md' + echo pods=monitoring-0 psmdb-client-6cd48df8b6-vv5md pods=monitoring-0 psmdb-client-6cd48df8b6-vv5md + collect_logs monitoring-2-0-21005 + local containers + local count + NS=monitoring-2-0-21005 + for p in '$pods' ++ kubectl_bin -n monitoring-2-0-21005 get pod monitoring-0 -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8W7LovjMUc +++ mktemp ++ local LAST_ERR=/tmp/tmp.6TjjgPlbVR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n monitoring-2-0-21005 get pod monitoring-0 -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8W7LovjMUc ++ cat /tmp/tmp.6TjjgPlbVR ++ rm /tmp/tmp.8W7LovjMUc /tmp/tmp.6TjjgPlbVR ++ return 0 + containers=monitoring + for c in '$containers' + [[ monitoring =~ pmm ]] + kubectl_bin -n monitoring-2-0-21005 logs monitoring-0 -c monitoring ++ mktemp + local LAST_OUT=/tmp/tmp.mwHBrrevGT ++ mktemp + local LAST_ERR=/tmp/tmp.aJ1Z4ClZqP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n monitoring-2-0-21005 logs monitoring-0 -c monitoring + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mwHBrrevGT + cat /tmp/tmp.aJ1Z4ClZqP + rm /tmp/tmp.mwHBrrevGT /tmp/tmp.aJ1Z4ClZqP + return 0 + echo logs saved in: /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt logs saved in: /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.D0vv3Eolup/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + for p in '$pods' ++ kubectl_bin -n monitoring-2-0-21005 get pod psmdb-client-6cd48df8b6-vv5md -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9kfRbrha9u +++ mktemp ++ local LAST_ERR=/tmp/tmp.MGQR7j0b7Z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n monitoring-2-0-21005 get pod psmdb-client-6cd48df8b6-vv5md -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9kfRbrha9u ++ cat /tmp/tmp.MGQR7j0b7Z ++ rm /tmp/tmp.9kfRbrha9u /tmp/tmp.MGQR7j0b7Z ++ return 0 + containers=psmdb-client + for c in '$containers' + [[ psmdb-client =~ pmm ]] + kubectl_bin -n monitoring-2-0-21005 logs psmdb-client-6cd48df8b6-vv5md -c psmdb-client ++ mktemp + local LAST_OUT=/tmp/tmp.y8AmjPsWy0 ++ mktemp + local LAST_ERR=/tmp/tmp.mGRnoh1UVj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n monitoring-2-0-21005 logs psmdb-client-6cd48df8b6-vv5md -c psmdb-client + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.y8AmjPsWy0 + cat /tmp/tmp.mGRnoh1UVj + rm /tmp/tmp.y8AmjPsWy0 /tmp/tmp.mGRnoh1UVj + return 0 + echo logs saved in: /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt logs saved in: /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.D0vv3Eolup/logs_output-psmdb-client-6cd48df8b6-vv5md-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + '[' -n psmdb-operator ']' ++ kubectl_bin -n psmdb-operator get pods -o name +++ mktemp ++ awk -F / '{print $2}' ++ local LAST_OUT=/tmp/tmp.YEG9VHJbp1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.EJ44UJDgIw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n psmdb-operator get pods -o name ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YEG9VHJbp1 ++ cat /tmp/tmp.EJ44UJDgIw ++ rm /tmp/tmp.YEG9VHJbp1 /tmp/tmp.EJ44UJDgIw ++ return 0 + pods=percona-server-mongodb-operator-7dd65664b8-c6ddc + collect_logs psmdb-operator + local containers + local count + NS=psmdb-operator + for p in '$pods' ++ kubectl_bin -n psmdb-operator get pod percona-server-mongodb-operator-7dd65664b8-c6ddc -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2j2qGKlRNX +++ mktemp ++ local LAST_ERR=/tmp/tmp.Iqkfi9Brzo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n psmdb-operator get pod percona-server-mongodb-operator-7dd65664b8-c6ddc -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2j2qGKlRNX ++ cat /tmp/tmp.Iqkfi9Brzo ++ rm /tmp/tmp.2j2qGKlRNX /tmp/tmp.Iqkfi9Brzo ++ return 0 + containers=percona-server-mongodb-operator + for c in '$containers' + [[ percona-server-mongodb-operator =~ pmm ]] + kubectl_bin -n psmdb-operator logs percona-server-mongodb-operator-7dd65664b8-c6ddc -c percona-server-mongodb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.kae7xRbJdb ++ mktemp + local LAST_ERR=/tmp/tmp.FcyVyMAcQP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n psmdb-operator logs percona-server-mongodb-operator-7dd65664b8-c6ddc -c percona-server-mongodb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kae7xRbJdb + cat /tmp/tmp.FcyVyMAcQP + rm /tmp/tmp.kae7xRbJdb /tmp/tmp.FcyVyMAcQP + return 0 + echo logs saved in: /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt logs saved in: /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.D0vv3Eolup/logs_output-percona-server-mongodb-operator-7dd65664b8-c6ddc-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + helm uninstall monitoring release "monitoring" uninstalled + destroy monitoring-2-0-21005 + local namespace=monitoring-2-0-21005 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.y6cFlzwGsr ++ mktemp + local LAST_ERR=/tmp/tmp.v8IqsPe3B1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.y6cFlzwGsr customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.v8IqsPe3B1 + rm /tmp/tmp.y6cFlzwGsr /tmp/tmp.v8IqsPe3B1 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.zASIgCbGWE ++ mktemp + local LAST_ERR=/tmp/tmp.f56ul2yGds + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zASIgCbGWE + cat /tmp/tmp.f56ul2yGds + rm /tmp/tmp.zASIgCbGWE /tmp/tmp.f56ul2yGds + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.w6V8KSUXGt ++ mktemp + local LAST_ERR=/tmp/tmp.4bryMvHPTD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.w6V8KSUXGt + cat /tmp/tmp.4bryMvHPTD + rm /tmp/tmp.w6V8KSUXGt /tmp/tmp.4bryMvHPTD + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.XFMu2Q5sgp ++ mktemp + local LAST_ERR=/tmp/tmp.0DlttOv9Rm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XFMu2Q5sgp + cat /tmp/tmp.0DlttOv9Rm + rm /tmp/tmp.XFMu2Q5sgp /tmp/tmp.0DlttOv9Rm + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.rqtxtal002 ++ mktemp + local LAST_ERR=/tmp/tmp.MMzkoB6oN6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1578/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rqtxtal002 clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.MMzkoB6oN6 + rm /tmp/tmp.rqtxtal002 /tmp/tmp.MMzkoB6oN6 + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.clYyQPilRC ++ mktemp + local LAST_ERR=/tmp/tmp.uNfdeZsNbT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.clYyQPilRC namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted serviceaccount "cert-manager" deleted serviceaccount "cert-manager-webhook" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted role.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.uNfdeZsNbT Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.clYyQPilRC namespace "cert-manager" deleted + cat /tmp/tmp.uNfdeZsNbT Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.clYyQPilRC + cat /tmp/tmp.uNfdeZsNbT Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.clYyQPilRC + cat /tmp/tmp.uNfdeZsNbT Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.clYyQPilRC /tmp/tmp.uNfdeZsNbT + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + rm -rf /tmp/tmp.D0vv3Eolup + kubectl_bin delete --grace-period=0 --force=true namespace monitoring-2-0-21005 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.68JGFim9RG + local LAST_OUT=/tmp/tmp.k5hPF0ds5H + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.bJKyJRE326 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace monitoring-2-0-21005 + local LAST_ERR=/tmp/tmp.VDDl4sb1M8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator