Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/logs/monitoring-pmm3.log WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + create_infra monitoring-pmm3-23926 + local ns=monitoring-pmm3-23926 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.NhPUwcz83l ++ mktemp + local LAST_ERR=/tmp/tmp.v7cJyeTUtf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NhPUwcz83l + cat /tmp/tmp.v7cJyeTUtf + rm /tmp/tmp.NhPUwcz83l /tmp/tmp.v7cJyeTUtf + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.E7ssr7jTmS ++ mktemp + local LAST_ERR=/tmp/tmp.smYHUHoLhc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.E7ssr7jTmS + cat /tmp/tmp.smYHUHoLhc + rm /tmp/tmp.E7ssr7jTmS /tmp/tmp.smYHUHoLhc + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.TBVLU5SPVo ++ mktemp + local LAST_ERR=/tmp/tmp.8tFVsIhBLs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TBVLU5SPVo + cat /tmp/tmp.8tFVsIhBLs + rm /tmp/tmp.TBVLU5SPVo /tmp/tmp.8tFVsIhBLs + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.twFj0yiP4I ++ mktemp + local LAST_ERR=/tmp/tmp.YB4lV0imlK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.twFj0yiP4I + cat /tmp/tmp.YB4lV0imlK + rm /tmp/tmp.twFj0yiP4I /tmp/tmp.YB4lV0imlK + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.aY7VknbILQ ++ mktemp + local LAST_ERR=/tmp/tmp.cevYJiyk1G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aY7VknbILQ + cat /tmp/tmp.cevYJiyk1G + rm /tmp/tmp.aY7VknbILQ /tmp/tmp.cevYJiyk1G + return 0 + check_crd_for_deletion PR-1993-9d545650 + local git_tag=PR-1993-9d545650 ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/bin/sed s/---//g ++ yq eval .metadata.name ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1993-9d545650/deploy/crd.yaml + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OfG15BqNbA +++ mktemp ++ local LAST_ERR=/tmp/tmp.7uoBE2vUHf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.OfG15BqNbA ++ cat /tmp/tmp.7uoBE2vUHf Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.OfG15BqNbA ++ cat /tmp/tmp.7uoBE2vUHf Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.OfG15BqNbA ++ cat /tmp/tmp.7uoBE2vUHf Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.OfG15BqNbA ++ cat /tmp/tmp.7uoBE2vUHf Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.OfG15BqNbA /tmp/tmp.7uoBE2vUHf ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.oWxb0WaEq1 ++ mktemp + local LAST_OUT=/tmp/tmp.VHhpy3vFfC ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.0zwA5eRvBH + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.QegDY9TtXs + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VHhpy3vFfC + cat /tmp/tmp.QegDY9TtXs + rm /tmp/tmp.VHhpy3vFfC /tmp/tmp.QegDY9TtXs + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oWxb0WaEq1 + cat /tmp/tmp.0zwA5eRvBH + rm /tmp/tmp.oWxb0WaEq1 /tmp/tmp.0zwA5eRvBH + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp error: resource(s) were provided, but no name was specified + local LAST_OUT=/tmp/tmp.9mvfxSFC4t ++ mktemp + local LAST_ERR=/tmp/tmp.K4PpW91MLh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9mvfxSFC4t + cat /tmp/tmp.K4PpW91MLh + rm /tmp/tmp.9mvfxSFC4t /tmp/tmp.K4PpW91MLh + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.8ISDefvXZR ++ mktemp + local LAST_ERR=/tmp/tmp.k7unBCNLpb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8ISDefvXZR namespace/psmdb-operator created + cat /tmp/tmp.k7unBCNLpb + rm /tmp/tmp.8ISDefvXZR /tmp/tmp.k7unBCNLpb + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.eyWcZcPBnW +++ mktemp ++ local LAST_ERR=/tmp/tmp.3oaCrK9nIa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eyWcZcPBnW ++ cat /tmp/tmp.3oaCrK9nIa ++ rm /tmp/tmp.eyWcZcPBnW /tmp/tmp.3oaCrK9nIa ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-9d545650-2-cluster2 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Va3KL57Xo7 ++ mktemp + local LAST_ERR=/tmp/tmp.EdE7T1IyIq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-9d545650-2-cluster2 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Va3KL57Xo7 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-9d545650-2-cluster2" modified. + cat /tmp/tmp.EdE7T1IyIq + rm /tmp/tmp.Va3KL57Xo7 /tmp/tmp.EdE7T1IyIq + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.jt8psvCohm ++ mktemp + local LAST_ERR=/tmp/tmp.UyR09Gbby7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jt8psvCohm customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.UyR09Gbby7 + rm /tmp/tmp.jt8psvCohm /tmp/tmp.UyR09Gbby7 + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + kubectl_bin apply -n psmdb-operator -f - + sed -e 's^namespace: .*^namespace: psmdb-operator^' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/cw-rbac.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.PrTaM4vOLu ++ mktemp + local LAST_ERR=/tmp/tmp.8g7nSPOco0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PrTaM4vOLu clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.8g7nSPOco0 + rm /tmp/tmp.PrTaM4vOLu /tmp/tmp.8g7nSPOco0 + return 0 + kubectl_bin apply -f - ++ mktemp + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1993-9d545650") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/cw-operator.yaml + local LAST_OUT=/tmp/tmp.xuGDIhReTS ++ mktemp + local LAST_ERR=/tmp/tmp.t9JDYMZXIK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xuGDIhReTS deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.t9JDYMZXIK + rm /tmp/tmp.xuGDIhReTS /tmp/tmp.t9JDYMZXIK + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.mJo2uncBpO +++ mktemp ++ local LAST_ERR=/tmp/tmp.w0cmqELe2m ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mJo2uncBpO ++ cat /tmp/tmp.w0cmqELe2m ++ rm /tmp/tmp.mJo2uncBpO /tmp/tmp.w0cmqELe2m ++ return 0 + wait_pod percona-server-mongodb-operator-7dbb56857b-66vlr + local pod=percona-server-mongodb-operator-7dbb56857b-66vlr + set +o xtrace waiting for pod/percona-server-mongodb-operator-7dbb56857b-66vlr to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.M4udDHVqge +++ mktemp ++ local LAST_ERR=/tmp/tmp.IAeRiWS7R1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M4udDHVqge ++ cat /tmp/tmp.IAeRiWS7R1 ++ rm /tmp/tmp.M4udDHVqge /tmp/tmp.IAeRiWS7R1 ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-7dbb56857b-66vlr ++ mktemp + local LAST_OUT=/tmp/tmp.PMiAoe2P9G ++ mktemp + local LAST_ERR=/tmp/tmp.rMVYfAK8q6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs percona-server-mongodb-operator-7dbb56857b-66vlr + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PMiAoe2P9G + cat /tmp/tmp.rMVYfAK8q6 + rm /tmp/tmp.PMiAoe2P9G /tmp/tmp.rMVYfAK8q6 + return 0 2025-07-04T02:22:34.605Z INFO setup Manager starting up {"gitCommit": "9d5456508def1bb97ffa7da79b450ff41a98b5fb", "gitBranch": "PR-1993-9d545650", "buildTime": "", "goVersion": "go1.24.4", "os": "linux", "arch": "amd64"} + create_namespace monitoring-pmm3-23926 + local namespace=monitoring-pmm3-23926 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// ++ helm list --all-namespaces --filter chaos-mesh + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ grep chaos-mesh.org ++ kubectl get crd ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrolebinding ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces monitoring-pmm3-23926' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-pmm3-23926 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-pmm3-23926 --ignore-not-found + awk '{print$1}' + xargs kubectl delete ns + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.dXFOt1IIbz ++ mktemp + local LAST_OUT=/tmp/tmp.wK9o9XCzvS + local LAST_ERR=/tmp/tmp.e537ExiSzP + local exit_status=0 + local timeout=4 ++ mktemp + local LAST_ERR=/tmp/tmp.bFKmlvKi0K + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-pmm3-23926 --ignore-not-found ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wK9o9XCzvS + cat /tmp/tmp.bFKmlvKi0K + rm /tmp/tmp.wK9o9XCzvS /tmp/tmp.bFKmlvKi0K + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dXFOt1IIbz + cat /tmp/tmp.e537ExiSzP + rm /tmp/tmp.dXFOt1IIbz /tmp/tmp.e537ExiSzP + return 0 + kubectl_bin wait --for=delete namespace monitoring-pmm3-23926 ++ mktemp + local LAST_OUT=/tmp/tmp.ob6lfishBQ ++ mktemp + local LAST_ERR=/tmp/tmp.VdNwa2bUjk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace monitoring-pmm3-23926 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ob6lfishBQ + cat /tmp/tmp.VdNwa2bUjk + rm /tmp/tmp.ob6lfishBQ /tmp/tmp.VdNwa2bUjk + return 0 + desc 'create namespace monitoring-pmm3-23926' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-pmm3-23926 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-pmm3-23926 ++ mktemp + local LAST_OUT=/tmp/tmp.XOJ1W1zMvT ++ mktemp + local LAST_ERR=/tmp/tmp.G5z54ykjm5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace monitoring-pmm3-23926 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XOJ1W1zMvT namespace/monitoring-pmm3-23926 created + cat /tmp/tmp.G5z54ykjm5 + rm /tmp/tmp.XOJ1W1zMvT /tmp/tmp.G5z54ykjm5 + return 0 + set_kube_ctx monitoring-pmm3-23926 + local namespace=monitoring-pmm3-23926 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.Cs8aDbBYUS +++ mktemp ++ local LAST_ERR=/tmp/tmp.VVtQtU7Lbs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Cs8aDbBYUS ++ cat /tmp/tmp.VVtQtU7Lbs ++ rm /tmp/tmp.Cs8aDbBYUS /tmp/tmp.VVtQtU7Lbs ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-9d545650-2-cluster2 --namespace=monitoring-pmm3-23926 ++ mktemp + local LAST_OUT=/tmp/tmp.MxnmAiagoC ++ mktemp + local LAST_ERR=/tmp/tmp.pMTwCxB5Vz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-9d545650-2-cluster2 --namespace=monitoring-pmm3-23926 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MxnmAiagoC Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1993-9d545650-2-cluster2" modified. + cat /tmp/tmp.pMTwCxB5Vz + rm /tmp/tmp.MxnmAiagoC /tmp/tmp.pMTwCxB5Vz + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.HJ9OUqdNrz ++ mktemp + local LAST_ERR=/tmp/tmp.dIGjRIaFN2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HJ9OUqdNrz namespace/cert-manager created + cat /tmp/tmp.dIGjRIaFN2 + rm /tmp/tmp.HJ9OUqdNrz /tmp/tmp.dIGjRIaFN2 + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.O3ZIo3uAHL ++ mktemp + local LAST_ERR=/tmp/tmp.xJgghU1eLd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.O3ZIo3uAHL namespace/cert-manager labeled + cat /tmp/tmp.xJgghU1eLd + rm /tmp/tmp.O3ZIo3uAHL /tmp/tmp.xJgghU1eLd + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.FWulBmCpWV ++ mktemp + local LAST_ERR=/tmp/tmp.WGOk5E7P3O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FWulBmCpWV namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io created serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim created clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view created clusterrole.rbac.authorization.k8s.io/cert-manager-view created clusterrole.rbac.authorization.k8s.io/cert-manager-edit created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests created clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews created role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection created role.rbac.authorization.k8s.io/cert-manager:leaderelection created role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection created rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection created rolebinding.rbac.authorization.k8s.io/cert-manager-cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook created validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook created + cat /tmp/tmp.WGOk5E7P3O Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.FWulBmCpWV /tmp/tmp.WGOk5E7P3O + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.SeM9pBHvgB ++ mktemp + local LAST_ERR=/tmp/tmp.lJRb8FwuwX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SeM9pBHvgB pod/cert-manager-6687d8765c-s2982 condition met pod/cert-manager-cainjector-764498cfc8-qgfh6 condition met pod/cert-manager-webhook-74c74b87d7-qhcs2 condition met + cat /tmp/tmp.lJRb8FwuwX + rm /tmp/tmp.SeM9pBHvgB /tmp/tmp.lJRb8FwuwX + return 0 + sleep 120 + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm3_server + helm repo remove stable Error: no repo named "stable" found + : + helm repo add stable https://charts.helm.sh/stable "stable" has been added to your repositories + [[ -n '' ]] + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + helm repo remove percona Error: no repo named "percona" found + : + kubectl delete clusterrole monitoring --ignore-not-found + kubectl delete clusterrolebinding monitoring --ignore-not-found + helm repo add percona https://percona.github.io/percona-helm-charts/ "percona" has been added to your repositories + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "hashicorp" chart repository ...Successfully got an update from the "percona" chart repository ...Successfully got an update from the "stable" chart repository Update Complete. ⎈Happy Helming!⎈ + retry 10 60 helm install monitoring percona/pmm --set fullnameOverride=monitoring-server --set image.tag=3.1.0 --set image.repository=perconalab/pmm-server --set service.type=LoadBalancer --force + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring percona/pmm --set fullnameOverride=monitoring-server --set image.tag=3.1.0 --set image.repository=perconalab/pmm-server --set service.type=LoadBalancer --force NAME: monitoring LAST DEPLOYED: Fri Jul 4 02:32:16 2025 NAMESPACE: monitoring-pmm3-23926 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: Percona Monitoring and Management (PMM) An open source database monitoring, observability and management tool Check more info here: https://docs.percona.com/percona-monitoring-and-management/index.html Get the application URL: NOTE: It may take a few minutes for the LoadBalancer IP to be available. You can watch the status of by running 'kubectl get --namespace monitoring-pmm3-23926 svc -w monitoring-service' export SERVICE_IP=$(kubectl get svc --namespace monitoring-pmm3-23926 monitoring-service -o jsonpath="{.status.loadBalancer.ingress[0].ip}") echo https://$SERVICE_IP: Get password for the "admin" user: export ADMIN_PASS=$(kubectl get secret pmm-secret --namespace monitoring-pmm3-23926 -o jsonpath='{.data.PMM_ADMIN_PASSWORD}' | base64 --decode) echo $ADMIN_PASS + sleep 20 + kubectl_bin exec monitoring-server-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.a7xifSp8nV ++ mktemp + local LAST_ERR=/tmp/tmp.NWfjrprCt2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-server-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.a7xifSp8nV + cat /tmp/tmp.NWfjrprCt2 error: Internal error occurred: unable to upgrade connection: container not found ("pmm") + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-server-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.a7xifSp8nV + cat /tmp/tmp.NWfjrprCt2 error: Internal error occurred: unable to upgrade connection: container not found ("pmm") + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-server-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.a7xifSp8nV + cat /tmp/tmp.NWfjrprCt2 + rm /tmp/tmp.a7xifSp8nV /tmp/tmp.NWfjrprCt2 + return 0 + cluster=monitoring-pmm3 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.7iHGjSuaX1 ++ mktemp + local LAST_ERR=/tmp/tmp.yl1xltzl3a + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7iHGjSuaX1 secret/some-users created secret/some-users unchanged + cat /tmp/tmp.yl1xltzl3a + rm /tmp/tmp.7iHGjSuaX1 /tmp/tmp.yl1xltzl3a + return 0 + kubectl_bin apply -f - + yq '.spec.template.spec.volumes[0].secret.secretName="monitoring-pmm3-ssl"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.geHxuNibrP ++ mktemp + local LAST_ERR=/tmp/tmp.CyABcXrNNV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.geHxuNibrP deployment.apps/psmdb-client created + cat /tmp/tmp.CyABcXrNNV + rm /tmp/tmp.geHxuNibrP /tmp/tmp.CyABcXrNNV + return 0 + sleep 90 + desc 'create first PSMDB cluster monitoring-pmm3' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster monitoring-pmm3 ----------------------------------------------------------------------------------- + custom_cluster_name=super-custom + yq eval '.spec.upgradeOptions.apply = "Never"' - + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' - + kubectl_bin apply -f - + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1993-9d545650"' - + yq eval '(.spec | select(has("pmm"))).pmm.customClusterName = "super-custom"' - + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:3.1.0"' - + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/conf/monitoring-pmm3-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.5O6mlVJQxS ++ mktemp + local LAST_ERR=/tmp/tmp.7Z3IRYksbF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5O6mlVJQxS perconaservermongodb.psmdb.percona.com/monitoring-pmm3 created + cat /tmp/tmp.7Z3IRYksbF + rm /tmp/tmp.5O6mlVJQxS /tmp/tmp.7Z3IRYksbF + return 0 + wait_for_running monitoring-pmm3-rs0 3 + local name=monitoring-pmm3-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring-pmm3 ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod monitoring-pmm3-rs0-0 + local pod=monitoring-pmm3-rs0-0 + set +o xtrace waiting for pod/monitoring-pmm3-rs0-0 to be ready..OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod monitoring-pmm3-rs0-1 + local pod=monitoring-pmm3-rs0-1 + set +o xtrace waiting for pod/monitoring-pmm3-rs0-1 to be ready..OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WwNnD9FhLz +++ mktemp ++ local LAST_ERR=/tmp/tmp.Y7igoqRtF7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WwNnD9FhLz ++ cat /tmp/tmp.Y7igoqRtF7 ++ rm /tmp/tmp.WwNnD9FhLz /tmp/tmp.Y7igoqRtF7 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-pmm3-rs0-2 + local pod=monitoring-pmm3-rs0-2 + set +o xtrace waiting for pod/monitoring-pmm3-rs0-2 to be ready...OK ++ kubectl_bin get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0szZLbcFRn +++ mktemp ++ local LAST_ERR=/tmp/tmp.o8Utl7JM4G ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0szZLbcFRn ++ cat /tmp/tmp.o8Utl7JM4G ++ rm /tmp/tmp.0szZLbcFRn /tmp/tmp.o8Utl7JM4G ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Bq7h3mKKkx +++ mktemp ++ local LAST_ERR=/tmp/tmp.4RGvP8mpyr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Bq7h3mKKkx ++ cat /tmp/tmp.4RGvP8mpyr ++ rm /tmp/tmp.Bq7h3mKKkx /tmp/tmp.4RGvP8mpyr ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.... + desc 'check if pmm-client container is not enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container is not enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-pmm3-rs0 -no-pmm + local resource=statefulset/monitoring-pmm3-rs0 + local postfix=-no-pmm + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-rs0-no-pmm.yml + local new_result=/tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-rs0-no-pmm-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-23926", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | + kubectl_bin get -o yaml statefulset/monitoring-pmm3-rs0 (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.7XLHNj0ukQ ++ mktemp + local LAST_ERR=/tmp/tmp.znpC5iNiS8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-pmm3-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7XLHNj0ukQ + cat /tmp/tmp.znpC5iNiS8 + rm /tmp/tmp.7XLHNj0ukQ /tmp/tmp.znpC5iNiS8 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-rs0.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-rs0-no-pmm.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-rs0-no-pmm.yml /tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-rs0.yml + sleep 10 + custom_port=27019 + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@monitoring-pmm3-mongos.monitoring-pmm3-23926 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@monitoring-pmm3-mongos.monitoring-pmm3-23926 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7bpaINyrYC +++ mktemp ++ local LAST_ERR=/tmp/tmp.cXzVbMyyZ4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7bpaINyrYC ++ cat /tmp/tmp.cXzVbMyyZ4 ++ rm /tmp/tmp.7bpaINyrYC /tmp/tmp.cXzVbMyyZ4 ++ return 0 + local client_container=psmdb-client-f8b7b5fb5-g5rlz + kubectl_bin exec psmdb-client-f8b7b5fb5-g5rlz -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-pmm3-mongos.monitoring-pmm3-23926.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.pw6Cq7W4B0 ++ mktemp + local LAST_ERR=/tmp/tmp.QkVBdTFYvg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-f8b7b5fb5-g5rlz -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-pmm3-mongos.monitoring-pmm3-23926.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pw6Cq7W4B0 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-pmm3-mongos.monitoring-pmm3-23926.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2025-07-04T02:39:08.595Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("4bc2f675-2041-4370-8cc6-ab9f32f7d393") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.QkVBdTFYvg + rm /tmp/tmp.pw6Cq7W4B0 /tmp/tmp.QkVBdTFYvg + return 0 + run_mongos 'sh.enableSharding("myApp")' clusterAdmin:clusterAdmin123456@monitoring-pmm3-mongos.monitoring-pmm3-23926 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=sh.enableSharding("myApp")' + local uri=clusterAdmin:clusterAdmin123456@monitoring-pmm3-mongos.monitoring-pmm3-23926 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sFsydH5n4E +++ mktemp ++ local LAST_ERR=/tmp/tmp.vGs3uVTORt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sFsydH5n4E ++ cat /tmp/tmp.vGs3uVTORt ++ rm /tmp/tmp.sFsydH5n4E /tmp/tmp.vGs3uVTORt ++ return 0 + local client_container=psmdb-client-f8b7b5fb5-g5rlz + kubectl_bin exec psmdb-client-f8b7b5fb5-g5rlz -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-pmm3-mongos.monitoring-pmm3-23926.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.5gEtaJ0yRL ++ mktemp + local LAST_ERR=/tmp/tmp.n5LO6iMIqO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-f8b7b5fb5-g5rlz -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-pmm3-mongos.monitoring-pmm3-23926.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5gEtaJ0yRL Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-pmm3-mongos.monitoring-pmm3-23926.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2025-07-04T02:39:22.834Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("403dea4a-1044-423d-9f32-bf640b762f7d") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1751596763, 1), "signature" : { "hash" : BinData(0,"/uXm0vXdnt0+UuLk9XN3Z3aD+xY="), "keyId" : NumberLong("7523050228748910614") } }, "operationTime" : Timestamp(1751596762, 3) } bye + cat /tmp/tmp.n5LO6iMIqO + rm /tmp/tmp.5gEtaJ0yRL /tmp/tmp.n5LO6iMIqO + return 0 + insert_data_mongos 100500 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100500 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-23926 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-23926 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MjaAo8P8N1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.F2sQRK9jqV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MjaAo8P8N1 ++ cat /tmp/tmp.F2sQRK9jqV ++ rm /tmp/tmp.MjaAo8P8N1 /tmp/tmp.F2sQRK9jqV ++ return 0 + local client_container=psmdb-client-f8b7b5fb5-g5rlz + kubectl_bin exec psmdb-client-f8b7b5fb5-g5rlz -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-23926.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.leJ0G6QTIH ++ mktemp + local LAST_ERR=/tmp/tmp.DEvPgIqrrh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-f8b7b5fb5-g5rlz -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-23926.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.leJ0G6QTIH Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-pmm3-mongos.monitoring-pmm3-23926.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2025-07-04T02:39:34.992Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("f7649f5a-4cae-4baf-a9dc-c973524dd094") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.DEvPgIqrrh + rm /tmp/tmp.leJ0G6QTIH /tmp/tmp.DEvPgIqrrh + return 0 + insert_data_mongos 100600 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100600 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100600 })' myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-23926 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100600 })' + local uri=myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-23926 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.flyiWVKfEj +++ mktemp ++ local LAST_ERR=/tmp/tmp.t6KgddxjG7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.flyiWVKfEj ++ cat /tmp/tmp.t6KgddxjG7 ++ rm /tmp/tmp.flyiWVKfEj /tmp/tmp.t6KgddxjG7 ++ return 0 + local client_container=psmdb-client-f8b7b5fb5-g5rlz + kubectl_bin exec psmdb-client-f8b7b5fb5-g5rlz -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-23926.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.YGIiIqbDWb ++ mktemp + local LAST_ERR=/tmp/tmp.WLrawlObHQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-f8b7b5fb5-g5rlz -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-23926.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YGIiIqbDWb Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-pmm3-mongos.monitoring-pmm3-23926.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2025-07-04T02:39:38.937Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("e0aab43b-5d5a-41ec-8ac3-27b374eb0e14") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.WLrawlObHQ + rm /tmp/tmp.YGIiIqbDWb /tmp/tmp.WLrawlObHQ + return 0 + insert_data_mongos 100700 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100700 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100700 })' myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-23926 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100700 })' + local uri=myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-23926 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ awk -F: '{print $2}' ++ echo .svc.cluster.local + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4EITWlouop +++ mktemp ++ local LAST_ERR=/tmp/tmp.E2VFUPAsWs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4EITWlouop ++ cat /tmp/tmp.E2VFUPAsWs ++ rm /tmp/tmp.4EITWlouop /tmp/tmp.E2VFUPAsWs ++ return 0 + local client_container=psmdb-client-f8b7b5fb5-g5rlz + kubectl_bin exec psmdb-client-f8b7b5fb5-g5rlz -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-23926.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.NBH0clGC5Z ++ mktemp + local LAST_ERR=/tmp/tmp.04QD0D7lLM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-f8b7b5fb5-g5rlz -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-23926.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NBH0clGC5Z Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-pmm3-mongos.monitoring-pmm3-23926.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2025-07-04T02:39:49.951Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("8e454c51-af9e-4eea-97b3-9b8915c8b902") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.04QD0D7lLM + rm /tmp/tmp.NBH0clGC5Z /tmp/tmp.04QD0D7lLM + return 0 + desc 'add PMM3 token to secret' + set +o xtrace ----------------------------------------------------------------------------------- add PMM3 token to secret ----------------------------------------------------------------------------------- ++ get_pmm_server_token operator ++ local key_name=operator ++ [[ -z operator ]] ++ local ADMIN_PASSWORD +++ kubectl get secret pmm-secret -o 'jsonpath={.data.PMM_ADMIN_PASSWORD}' +++ base64 --decode ++ ADMIN_PASSWORD='(z>.?85g/HixS*]h' ++ [[ -z (z>.?85g/HixS*]h ]] ++ local create_response create_status_code create_json_response ++++ get_service_endpoint monitoring-service ++++ local service=monitoring-service +++++ sed -e 's/^"//; s/"$//;' +++++ jq '.status.loadBalancer.ingress[].hostname' +++++ kubectl_bin get service/monitoring-service -o json ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.bIckvTi9OD ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.cJxOkMql3X +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o json +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 -a -n 1 ']' +++++ break +++++ cat /tmp/tmp.bIckvTi9OD +++++ cat /tmp/tmp.cJxOkMql3X +++++ rm /tmp/tmp.bIckvTi9OD /tmp/tmp.cJxOkMql3X +++++ return 0 ++++ local hostname=null ++++ '[' -n null -a null '!=' null ']' +++++ sed -e 's/^"//; s/"$//;' +++++ jq '.status.loadBalancer.ingress[].ip' +++++ kubectl_bin get service/monitoring-service -o json ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.tPEBsEIp3K ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.ZLckIuvPK4 +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o json +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 -a -n 1 ']' +++++ break +++++ cat /tmp/tmp.tPEBsEIp3K +++++ cat /tmp/tmp.ZLckIuvPK4 +++++ rm /tmp/tmp.tPEBsEIp3K /tmp/tmp.ZLckIuvPK4 +++++ return 0 ++++ local ip=34.10.182.81 ++++ '[' -n 34.10.182.81 -a 34.10.182.81 '!=' null ']' ++++ echo 34.10.182.81 ++++ return +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -d '{"name":"operator", "role":"Admin", "isDisabled":false}' --user 'admin:(z>.?85g/HixS*]h' https://34.10.182.81/graph/api/serviceaccounts -w '\n%{http_code}' ++ create_response='{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ echo '{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ tail -n1 ++ create_status_code=201 +++ sed '$ d' +++ echo '{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' ++ create_json_response='{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' ++ [[ 201 -ne 201 ]] ++ local service_account_id +++ jq -r .id +++ echo '{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' ++ service_account_id=2 ++ [[ -z 2 ]] ++ [[ 2 == \n\u\l\l ]] ++ local token_response token_status_code token_json_response ++++ get_service_endpoint monitoring-service ++++ local service=monitoring-service +++++ sed -e 's/^"//; s/"$//;' +++++ kubectl_bin get service/monitoring-service -o json +++++ jq '.status.loadBalancer.ingress[].hostname' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.tbjBNqejFM ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.hlOAbcy5dy +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o json +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 -a -n 1 ']' +++++ break +++++ cat /tmp/tmp.tbjBNqejFM +++++ cat /tmp/tmp.hlOAbcy5dy +++++ rm /tmp/tmp.tbjBNqejFM /tmp/tmp.hlOAbcy5dy +++++ return 0 ++++ local hostname=null ++++ '[' -n null -a null '!=' null ']' +++++ jq '.status.loadBalancer.ingress[].ip' +++++ sed -e 's/^"//; s/"$//;' +++++ kubectl_bin get service/monitoring-service -o json ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.Aqgo8x3H3Z ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.miaOFCdLg0 +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o json +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 -a -n 1 ']' +++++ break +++++ cat /tmp/tmp.Aqgo8x3H3Z +++++ cat /tmp/tmp.miaOFCdLg0 +++++ rm /tmp/tmp.Aqgo8x3H3Z /tmp/tmp.miaOFCdLg0 +++++ return 0 ++++ local ip=34.10.182.81 ++++ '[' -n 34.10.182.81 -a 34.10.182.81 '!=' null ']' ++++ echo 34.10.182.81 ++++ return +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -d '{"name":"operator"}' --user 'admin:(z>.?85g/HixS*]h' https://34.10.182.81/graph/api/serviceaccounts/2/tokens -w '\n%{http_code}' ++ token_response='{"id":1,"name":"operator","key":"glsa_aF87dBHPBVhe0CRpfPIeHwPkRYjHWdT8_5fa0e778"} 200' +++ tail -n1 +++ echo '{"id":1,"name":"operator","key":"glsa_aF87dBHPBVhe0CRpfPIeHwPkRYjHWdT8_5fa0e778"} 200' ++ token_status_code=200 +++ sed '$ d' +++ echo '{"id":1,"name":"operator","key":"glsa_aF87dBHPBVhe0CRpfPIeHwPkRYjHWdT8_5fa0e778"} 200' ++ token_json_response='{"id":1,"name":"operator","key":"glsa_aF87dBHPBVhe0CRpfPIeHwPkRYjHWdT8_5fa0e778"}' ++ [[ 200 -ne 200 ]] ++ jq -r .key ++ echo '{"id":1,"name":"operator","key":"glsa_aF87dBHPBVhe0CRpfPIeHwPkRYjHWdT8_5fa0e778"}' + TOKEN=glsa_aF87dBHPBVhe0CRpfPIeHwPkRYjHWdT8_5fa0e778 + kubectl_bin patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_TOKEN": "glsa_aF87dBHPBVhe0CRpfPIeHwPkRYjHWdT8_5fa0e778"}}' ++ mktemp + local LAST_OUT=/tmp/tmp.y9gdkRQsHP ++ mktemp + local LAST_ERR=/tmp/tmp.vQol4MEPFF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_TOKEN": "glsa_aF87dBHPBVhe0CRpfPIeHwPkRYjHWdT8_5fa0e778"}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.y9gdkRQsHP secret/some-users patched + cat /tmp/tmp.vQol4MEPFF + rm /tmp/tmp.y9gdkRQsHP /tmp/tmp.vQol4MEPFF + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running monitoring-pmm3-rs0 3 + local name=monitoring-pmm3-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring-pmm3 ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod monitoring-pmm3-rs0-0 + local pod=monitoring-pmm3-rs0-0 + set +o xtrace waiting for pod/monitoring-pmm3-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod monitoring-pmm3-rs0-1 + local pod=monitoring-pmm3-rs0-1 + set +o xtrace waiting for pod/monitoring-pmm3-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UYoTPF5XBZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.lBa8zhjOSq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UYoTPF5XBZ ++ cat /tmp/tmp.lBa8zhjOSq ++ rm /tmp/tmp.UYoTPF5XBZ /tmp/tmp.lBa8zhjOSq ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-pmm3-rs0-2 + local pod=monitoring-pmm3-rs0-2 + set +o xtrace waiting for pod/monitoring-pmm3-rs0-2 to be ready.OK ++ kubectl_bin get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LosJPVthSs +++ mktemp ++ local LAST_ERR=/tmp/tmp.vbnX1bvPWo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LosJPVthSs ++ cat /tmp/tmp.vbnX1bvPWo ++ rm /tmp/tmp.LosJPVthSs /tmp/tmp.vbnX1bvPWo ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QXwzdkJcin +++ mktemp ++ local LAST_ERR=/tmp/tmp.W2EcMrtWnM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QXwzdkJcin ++ cat /tmp/tmp.W2EcMrtWnM ++ rm /tmp/tmp.QXwzdkJcin /tmp/tmp.W2EcMrtWnM ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness...................... + sleep 90 + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-pmm3-rs0 + local resource=statefulset/monitoring-pmm3-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-rs0.yml + local new_result=/tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-pmm3-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-23926", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.BgcENbqybw ++ mktemp + local LAST_ERR=/tmp/tmp.YuHok5gTuD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-pmm3-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BgcENbqybw + cat /tmp/tmp.YuHok5gTuD + rm /tmp/tmp.BgcENbqybw /tmp/tmp.YuHok5gTuD + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-rs0.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-rs0.yml /tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-rs0.yml + compare_kubectl service/monitoring-pmm3-rs0 + local resource=service/monitoring-pmm3-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/service_monitoring-pmm3-rs0.yml + local new_result=/tmp/tmp.NJNkwVxXky/service_monitoring-pmm3-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/service_monitoring-pmm3-rs0-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-pmm3-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-23926", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.noOiYBAW0m ++ mktemp + local LAST_ERR=/tmp/tmp.wMF3iAiW2l + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/monitoring-pmm3-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.noOiYBAW0m + cat /tmp/tmp.wMF3iAiW2l + rm /tmp/tmp.noOiYBAW0m /tmp/tmp.wMF3iAiW2l + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.NJNkwVxXky/service_monitoring-pmm3-rs0.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.NJNkwVxXky/service_monitoring-pmm3-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.NJNkwVxXky/service_monitoring-pmm3-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/service_monitoring-pmm3-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/service_monitoring-pmm3-rs0.yml /tmp/tmp.NJNkwVxXky/service_monitoring-pmm3-rs0.yml + compare_kubectl service/monitoring-pmm3-mongos + local resource=service/monitoring-pmm3-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/service_monitoring-pmm3-mongos.yml + local new_result=/tmp/tmp.NJNkwVxXky/service_monitoring-pmm3-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/service_monitoring-pmm3-mongos-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-23926", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | + kubectl_bin get -o yaml service/monitoring-pmm3-mongos (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.9696N6OFVN ++ mktemp + local LAST_ERR=/tmp/tmp.xhFpMihQtF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/monitoring-pmm3-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9696N6OFVN + cat /tmp/tmp.xhFpMihQtF + rm /tmp/tmp.9696N6OFVN /tmp/tmp.xhFpMihQtF + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.NJNkwVxXky/service_monitoring-pmm3-mongos.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.NJNkwVxXky/service_monitoring-pmm3-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.NJNkwVxXky/service_monitoring-pmm3-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/service_monitoring-pmm3-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/service_monitoring-pmm3-mongos.yml /tmp/tmp.NJNkwVxXky/service_monitoring-pmm3-mongos.yml + compare_kubectl statefulset/monitoring-pmm3-cfg + local resource=statefulset/monitoring-pmm3-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-cfg.yml + local new_result=/tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-pmm3-cfg ++ mktemp + local LAST_OUT=/tmp/tmp.qSCr2u5lmb + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-23926", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_ERR=/tmp/tmp.YnZIts4cLJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-pmm3-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qSCr2u5lmb + cat /tmp/tmp.YnZIts4cLJ + rm /tmp/tmp.qSCr2u5lmb /tmp/tmp.YnZIts4cLJ + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-cfg.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-cfg.yml /tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-cfg.yml + compare_kubectl statefulset/monitoring-pmm3-mongos + local resource=statefulset/monitoring-pmm3-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-mongos.yml + local new_result=/tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-mongos-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-23926", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml statefulset/monitoring-pmm3-mongos ++ mktemp + local LAST_OUT=/tmp/tmp.TEbqzeAMQZ ++ mktemp + local LAST_ERR=/tmp/tmp.43iGldxPXu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-pmm3-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TEbqzeAMQZ + cat /tmp/tmp.43iGldxPXu + rm /tmp/tmp.TEbqzeAMQZ /tmp/tmp.43iGldxPXu + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-mongos.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-mongos.yml /tmp/tmp.NJNkwVxXky/statefulset_monitoring-pmm3-mongos.yml + desc 'create new PMM token and add it to the secret' + set +o xtrace ----------------------------------------------------------------------------------- create new PMM token and add it to the secret ----------------------------------------------------------------------------------- ++ get_pmm_server_token operator_new ++ local key_name=operator_new ++ [[ -z operator_new ]] ++ local ADMIN_PASSWORD +++ base64 --decode +++ kubectl get secret pmm-secret -o 'jsonpath={.data.PMM_ADMIN_PASSWORD}' ++ ADMIN_PASSWORD='(z>.?85g/HixS*]h' ++ [[ -z (z>.?85g/HixS*]h ]] ++ local create_response create_status_code create_json_response ++++ get_service_endpoint monitoring-service ++++ local service=monitoring-service +++++ sed -e 's/^"//; s/"$//;' +++++ jq '.status.loadBalancer.ingress[].hostname' +++++ kubectl_bin get service/monitoring-service -o json ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.eFmkPrH9YQ ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.TV9HWxHOzV +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o json +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 -a -n 1 ']' +++++ break +++++ cat /tmp/tmp.eFmkPrH9YQ +++++ cat /tmp/tmp.TV9HWxHOzV +++++ rm /tmp/tmp.eFmkPrH9YQ /tmp/tmp.TV9HWxHOzV +++++ return 0 ++++ local hostname=null ++++ '[' -n null -a null '!=' null ']' +++++ sed -e 's/^"//; s/"$//;' +++++ jq '.status.loadBalancer.ingress[].ip' +++++ kubectl_bin get service/monitoring-service -o json ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.LgubuMRjFW ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.ExcsEwRsIe +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o json +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 -a -n 1 ']' +++++ break +++++ cat /tmp/tmp.LgubuMRjFW +++++ cat /tmp/tmp.ExcsEwRsIe +++++ rm /tmp/tmp.LgubuMRjFW /tmp/tmp.ExcsEwRsIe +++++ return 0 ++++ local ip=34.10.182.81 ++++ '[' -n 34.10.182.81 -a 34.10.182.81 '!=' null ']' ++++ echo 34.10.182.81 ++++ return +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -d '{"name":"operator_new", "role":"Admin", "isDisabled":false}' --user 'admin:(z>.?85g/HixS*]h' https://34.10.182.81/graph/api/serviceaccounts -w '\n%{http_code}' ++ create_response='{"id":3,"name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ echo '{"id":3,"name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} +++ tail -n1 201' ++ create_status_code=201 +++ echo '{"id":3,"name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ sed '$ d' ++ create_json_response='{"id":3,"name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' ++ [[ 201 -ne 201 ]] ++ local service_account_id +++ echo '{"id":3,"name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' +++ jq -r .id ++ service_account_id=3 ++ [[ -z 3 ]] ++ [[ 3 == \n\u\l\l ]] ++ local token_response token_status_code token_json_response ++++ get_service_endpoint monitoring-service ++++ local service=monitoring-service +++++ kubectl_bin get service/monitoring-service -o json +++++ jq '.status.loadBalancer.ingress[].hostname' +++++ sed -e 's/^"//; s/"$//;' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.4gPdAfxYHF ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.EgyFQq8hHp +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o json +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 -a -n 1 ']' +++++ break +++++ cat /tmp/tmp.4gPdAfxYHF +++++ cat /tmp/tmp.EgyFQq8hHp +++++ rm /tmp/tmp.4gPdAfxYHF /tmp/tmp.EgyFQq8hHp +++++ return 0 ++++ local hostname=null ++++ '[' -n null -a null '!=' null ']' +++++ sed -e 's/^"//; s/"$//;' +++++ jq '.status.loadBalancer.ingress[].ip' +++++ kubectl_bin get service/monitoring-service -o json ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.iFfIpqoucY ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.pCAz45c0LB +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o json +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 -a -n 1 ']' +++++ break +++++ cat /tmp/tmp.iFfIpqoucY +++++ cat /tmp/tmp.pCAz45c0LB +++++ rm /tmp/tmp.iFfIpqoucY /tmp/tmp.pCAz45c0LB +++++ return 0 ++++ local ip=34.10.182.81 ++++ '[' -n 34.10.182.81 -a 34.10.182.81 '!=' null ']' ++++ echo 34.10.182.81 ++++ return +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -d '{"name":"operator_new"}' --user 'admin:(z>.?85g/HixS*]h' https://34.10.182.81/graph/api/serviceaccounts/3/tokens -w '\n%{http_code}' ++ token_response='{"id":2,"name":"operator_new","key":"glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512"} 200' +++ tail -n1 +++ echo '{"id":2,"name":"operator_new","key":"glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512"} 200' ++ token_status_code=200 +++ sed '$ d' +++ echo '{"id":2,"name":"operator_new","key":"glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512"} 200' ++ token_json_response='{"id":2,"name":"operator_new","key":"glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512"}' ++ [[ 200 -ne 200 ]] ++ jq -r .key ++ echo '{"id":2,"name":"operator_new","key":"glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512"}' + NEW_TOKEN=glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 + kubectl_bin patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_TOKEN": "glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512"}}' ++ mktemp + local LAST_OUT=/tmp/tmp.zKBsorI2uF ++ mktemp + local LAST_ERR=/tmp/tmp.oxcIIoz9Tr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_TOKEN": "glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512"}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zKBsorI2uF secret/some-users patched + cat /tmp/tmp.oxcIIoz9Tr + rm /tmp/tmp.zKBsorI2uF /tmp/tmp.oxcIIoz9Tr + return 0 + desc 'delete old PMM token' + set +o xtrace ----------------------------------------------------------------------------------- delete old PMM token ----------------------------------------------------------------------------------- + delete_pmm_server_token operator + local key_name=operator + [[ -z operator ]] + local ADMIN_PASSWORD ++ base64 --decode ++ kubectl get secret pmm-secret -o 'jsonpath={.data.PMM_ADMIN_PASSWORD}' + ADMIN_PASSWORD='(z>.?85g/HixS*]h' + [[ -z (z>.?85g/HixS*]h ]] + local 'user_credentials=admin:(z>.?85g/HixS*]h' + local service_accounts_response service_accounts_status +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ sed -e 's/^"//; s/"$//;' ++++ jq '.status.loadBalancer.ingress[].hostname' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.2wzUD7nrAn +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.NgzRKIB9VT ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.2wzUD7nrAn ++++ cat /tmp/tmp.NgzRKIB9VT ++++ rm /tmp/tmp.2wzUD7nrAn /tmp/tmp.NgzRKIB9VT ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.wDXv9kHwpW +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.1D8gixKHdO ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.wDXv9kHwpW ++++ cat /tmp/tmp.1D8gixKHdO ++++ rm /tmp/tmp.wDXv9kHwpW /tmp/tmp.1D8gixKHdO ++++ return 0 +++ local ip=34.10.182.81 +++ '[' -n 34.10.182.81 -a 34.10.182.81 '!=' null ']' +++ echo 34.10.182.81 +++ return ++ curl --insecure -s -X GET --user 'admin:(z>.?85g/HixS*]h' https://34.10.182.81/graph/api/serviceaccounts/search -w '\n%{http_code}' + service_accounts_response='{"totalCount":2,"serviceAccounts":[{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000} 200' ++ tail -n1 ++ echo '{"totalCount":2,"serviceAccounts":[{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000} 200' + service_accounts_status=200 ++ sed '$ d' ++ echo '{"totalCount":2,"serviceAccounts":[{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000} 200' + service_accounts_json='{"totalCount":2,"serviceAccounts":[{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000}' + [[ 200 -ne 200 ]] + local service_account_id ++ jq -r '.serviceAccounts[] | select(.name == "operator").id' ++ echo '{"totalCount":2,"serviceAccounts":[{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000}' + service_account_id=2 + [[ -z 2 ]] + [[ 2 == \n\u\l\l ]] + local tokens_response tokens_status tokens_json +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ sed -e 's/^"//; s/"$//;' ++++ jq '.status.loadBalancer.ingress[].hostname' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.7Yy19zXBeI +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Im276Tvt3M ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.7Yy19zXBeI ++++ cat /tmp/tmp.Im276Tvt3M ++++ rm /tmp/tmp.7Yy19zXBeI /tmp/tmp.Im276Tvt3M ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ sed -e 's/^"//; s/"$//;' ++++ jq '.status.loadBalancer.ingress[].ip' ++++ kubectl_bin get service/monitoring-service -o json +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.JYegDOy4Uu +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.FdoMuqvyPN ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.JYegDOy4Uu ++++ cat /tmp/tmp.FdoMuqvyPN ++++ rm /tmp/tmp.JYegDOy4Uu /tmp/tmp.FdoMuqvyPN ++++ return 0 +++ local ip=34.10.182.81 +++ '[' -n 34.10.182.81 -a 34.10.182.81 '!=' null ']' +++ echo 34.10.182.81 +++ return ++ curl --insecure -s -X GET --user 'admin:(z>.?85g/HixS*]h' https://34.10.182.81/graph/api/serviceaccounts/2/tokens -w '\n%{http_code}' + tokens_response='[{"id":1,"name":"operator","created":"2025-07-04T02:40:25Z","lastUsedAt":"2025-07-04T02:51:07Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}] 200' ++ tail -n1 ++ echo '[{"id":1,"name":"operator","created":"2025-07-04T02:40:25Z","lastUsedAt":"2025-07-04T02:51:07Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}] 200' + tokens_status=200 ++ echo '[{"id":1,"name":"operator","created":"2025-07-04T02:40:25Z","lastUsedAt":"2025-07-04T02:51:07Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}] 200' ++ sed '$ d' + tokens_json='[{"id":1,"name":"operator","created":"2025-07-04T02:40:25Z","lastUsedAt":"2025-07-04T02:51:07Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}]' + [[ 200 -ne 200 ]] + local token_id ++ echo '[{"id":1,"name":"operator","created":"2025-07-04T02:40:25Z","lastUsedAt":"2025-07-04T02:51:07Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}]' ++ jq -r '.[] | select(.name == "operator").id' + token_id=1 + [[ -z 1 ]] + [[ 1 == \n\u\l\l ]] + local delete_response delete_status +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ sed -e 's/^"//; s/"$//;' ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ kubectl_bin get service/monitoring-service -o json +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.mnhfcVlj9H +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.qmlCb00FGO ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.mnhfcVlj9H ++++ cat /tmp/tmp.qmlCb00FGO ++++ rm /tmp/tmp.mnhfcVlj9H /tmp/tmp.qmlCb00FGO ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ sed -e 's/^"//; s/"$//;' ++++ jq '.status.loadBalancer.ingress[].ip' ++++ kubectl_bin get service/monitoring-service -o json +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.OqUA5oveWR +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.U2NjNHehKS ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.OqUA5oveWR ++++ cat /tmp/tmp.U2NjNHehKS ++++ rm /tmp/tmp.OqUA5oveWR /tmp/tmp.U2NjNHehKS ++++ return 0 +++ local ip=34.10.182.81 +++ '[' -n 34.10.182.81 -a 34.10.182.81 '!=' null ']' +++ echo 34.10.182.81 +++ return ++ curl --insecure -s -X DELETE --user 'admin:(z>.?85g/HixS*]h' https://34.10.182.81/graph/api/serviceaccounts/2/tokens/1 -w '\n%{http_code}' + delete_response='{"message":"Service account token deleted"} 200' ++ echo '{"message":"Service account token deleted"} ++ tail -n1 200' + delete_status=200 + [[ 200 -ne 200 ]] + desc 'check mongod metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongod metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-pmm3-23926-monitoring-pmm3-rs0-1 glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 + local metric=node_boot_time_seconds + local instance=monitoring-pmm3-23926-monitoring-pmm3-rs0-1 + local token=glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1751597427 ++ /usr/bin/date -u +%s + local end=1751597487 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ sed -e 's/^"//; s/"$//;' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NpyNFaICzm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.09Vogifdko +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.NpyNFaICzm +++ cat /tmp/tmp.09Vogifdko +++ rm /tmp/tmp.NpyNFaICzm /tmp/tmp.09Vogifdko +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].ip' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2XbJ8kOua4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7eTf1Qflrc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2XbJ8kOua4 +++ cat /tmp/tmp.7eTf1Qflrc +++ rm /tmp/tmp.2XbJ8kOua4 /tmp/tmp.7eTf1Qflrc +++ return 0 ++ local ip=34.10.182.81 ++ '[' -n 34.10.182.81 -a 34.10.182.81 '!=' null ']' ++ echo 34.10.182.81 ++ return + local endpoint=34.10.182.81 + '[' -z node_boot_time_seconds ']' + '[' -z glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 ']' + local wait_count=30 + local retry=0 ++ grep '^"[0-9]' ++ curl -s -k -H 'Authorization: Bearer glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512' 'https://34.10.182.81/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-pmm3-23926-monitoring-pmm3-rs0-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-pmm3-23926-monitoring-pmm3-rs0-1%22%7D%29&start=1751597427&end=1751597487&step=60' ++ jq '.data.result[0].values[][1]' + [[ -n "1751588405" "1751588405" ]] + get_metric_values mongodb_connections monitoring-pmm3-23926-monitoring-pmm3-rs0-1 glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 + local metric=mongodb_connections + local instance=monitoring-pmm3-23926-monitoring-pmm3-rs0-1 + local token=glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1751597462 ++ /usr/bin/date -u +%s + local end=1751597522 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ jq '.status.loadBalancer.ingress[].hostname' +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.EVZfsnEaWe ++++ mktemp +++ local LAST_ERR=/tmp/tmp.WP8FKQbmW6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.EVZfsnEaWe +++ cat /tmp/tmp.WP8FKQbmW6 +++ rm /tmp/tmp.EVZfsnEaWe /tmp/tmp.WP8FKQbmW6 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ sed -e 's/^"//; s/"$//;' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wZRnlrn1oU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.D92rzWDft5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wZRnlrn1oU +++ cat /tmp/tmp.D92rzWDft5 +++ rm /tmp/tmp.wZRnlrn1oU /tmp/tmp.D92rzWDft5 +++ return 0 ++ local ip=34.10.182.81 ++ '[' -n 34.10.182.81 -a 34.10.182.81 '!=' null ']' ++ echo 34.10.182.81 ++ return + local endpoint=34.10.182.81 + '[' -z mongodb_connections ']' + '[' -z glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 ']' + local wait_count=30 + local retry=0 ++ grep '^"[0-9]' ++ curl -s -k -H 'Authorization: Bearer glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512' 'https://34.10.182.81/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-pmm3-23926-monitoring-pmm3-rs0-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-pmm3-23926-monitoring-pmm3-rs0-1%22%7D%29&start=1751597462&end=1751597522&step=60' ++ jq '.data.result[0].values[][1]' + [[ -n "0" "0" ]] + desc 'check mongo config metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongo config metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-pmm3-23926-monitoring-pmm3-cfg-1 glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 + local metric=node_boot_time_seconds + local instance=monitoring-pmm3-23926-monitoring-pmm3-cfg-1 + local token=glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1751597479 ++ /usr/bin/date -u +%s + local end=1751597539 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ sed -e 's/^"//; s/"$//;' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RH4bvL5dIQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.42oBvU8V2d +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RH4bvL5dIQ +++ cat /tmp/tmp.42oBvU8V2d +++ rm /tmp/tmp.RH4bvL5dIQ /tmp/tmp.42oBvU8V2d +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9Klvx8idx2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cW1DW6t5el +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.9Klvx8idx2 +++ cat /tmp/tmp.cW1DW6t5el +++ rm /tmp/tmp.9Klvx8idx2 /tmp/tmp.cW1DW6t5el +++ return 0 ++ local ip=34.10.182.81 ++ '[' -n 34.10.182.81 -a 34.10.182.81 '!=' null ']' ++ echo 34.10.182.81 ++ return + local endpoint=34.10.182.81 + '[' -z node_boot_time_seconds ']' + '[' -z glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 ']' + local wait_count=30 + local retry=0 ++ jq '.data.result[0].values[][1]' ++ grep '^"[0-9]' ++ curl -s -k -H 'Authorization: Bearer glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512' 'https://34.10.182.81/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-pmm3-23926-monitoring-pmm3-cfg-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-pmm3-23926-monitoring-pmm3-cfg-1%22%7D%29&start=1751597479&end=1751597539&step=60' + [[ -n "1751588411" "1751588411" ]] + get_metric_values mongodb_connections monitoring-pmm3-23926-monitoring-pmm3-cfg-1 glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 + local metric=mongodb_connections + local instance=monitoring-pmm3-23926-monitoring-pmm3-cfg-1 + local token=glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1751597497 ++ /usr/bin/date -u +%s + local end=1751597557 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].hostname' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OUj1FetduF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gx6JTiM45U +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.OUj1FetduF +++ cat /tmp/tmp.gx6JTiM45U +++ rm /tmp/tmp.OUj1FetduF /tmp/tmp.gx6JTiM45U +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].ip' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.t0657sEZ5W ++++ mktemp +++ local LAST_ERR=/tmp/tmp.atwppTCzv8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.t0657sEZ5W +++ cat /tmp/tmp.atwppTCzv8 +++ rm /tmp/tmp.t0657sEZ5W /tmp/tmp.atwppTCzv8 +++ return 0 ++ local ip=34.10.182.81 ++ '[' -n 34.10.182.81 -a 34.10.182.81 '!=' null ']' ++ echo 34.10.182.81 ++ return + local endpoint=34.10.182.81 + '[' -z mongodb_connections ']' + '[' -z glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 ']' + local wait_count=30 + local retry=0 ++ grep '^"[0-9]' ++ curl -s -k -H 'Authorization: Bearer glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512' 'https://34.10.182.81/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-pmm3-23926-monitoring-pmm3-cfg-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-pmm3-23926-monitoring-pmm3-cfg-1%22%7D%29&start=1751597497&end=1751597557&step=60' ++ jq '.data.result[0].values[][1]' + [[ -n "0" ]] + desc 'check mongos metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongos metrics ----------------------------------------------------------------------------------- ++ kubectl get pod -l app.kubernetes.io/component=mongos -o 'jsonpath={.items[0].metadata.name}' + MONGOS_POD_NAME=monitoring-pmm3-mongos-0 + get_metric_values node_boot_time_seconds monitoring-pmm3-23926-monitoring-pmm3-mongos-0 glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 + local metric=node_boot_time_seconds + local instance=monitoring-pmm3-23926-monitoring-pmm3-mongos-0 + local token=glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1751597542 ++ /usr/bin/date -u +%s + local end=1751597602 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].hostname' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.C1ZvoYrveH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aHm8M7LLF7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.C1ZvoYrveH +++ cat /tmp/tmp.aHm8M7LLF7 +++ rm /tmp/tmp.C1ZvoYrveH /tmp/tmp.aHm8M7LLF7 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.8l6jh3o5EU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5fep0YX63a +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.8l6jh3o5EU +++ cat /tmp/tmp.5fep0YX63a +++ rm /tmp/tmp.8l6jh3o5EU /tmp/tmp.5fep0YX63a +++ return 0 ++ local ip=34.10.182.81 ++ '[' -n 34.10.182.81 -a 34.10.182.81 '!=' null ']' ++ echo 34.10.182.81 ++ return + local endpoint=34.10.182.81 + '[' -z node_boot_time_seconds ']' + '[' -z glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 ']' + local wait_count=30 + local retry=0 ++ curl -s -k -H 'Authorization: Bearer glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512' 'https://34.10.182.81/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-pmm3-23926-monitoring-pmm3-mongos-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-pmm3-23926-monitoring-pmm3-mongos-0%22%7D%29&start=1751597542&end=1751597602&step=60' ++ grep '^"[0-9]' ++ jq '.data.result[0].values[][1]' + [[ -n "1751588405" ]] + sleep 90 + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan_values mongodb dev-mongod glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 + local service_type=mongodb + local environment=dev-mongod + local token=glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 + local start + local end + local endpoint ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2025-07-03T14:55:33+00:00 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2025-07-04T02:55:33+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ sed -e 's/^"//; s/"$//;' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ jq '.status.loadBalancer.ingress[].hostname' +++ local LAST_OUT=/tmp/tmp.2vXyXOOKt5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LGd0w9ADrK +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2vXyXOOKt5 +++ cat /tmp/tmp.LGd0w9ADrK +++ rm /tmp/tmp.2vXyXOOKt5 /tmp/tmp.LGd0w9ADrK +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.w54oVGdUC9 +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_ERR=/tmp/tmp.nGTBFgoU2l +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.w54oVGdUC9 +++ cat /tmp/tmp.nGTBFgoU2l +++ rm /tmp/tmp.w54oVGdUC9 /tmp/tmp.nGTBFgoU2l +++ return 0 ++ local ip=34.10.182.81 ++ '[' -n 34.10.182.81 -a 34.10.182.81 '!=' null ']' ++ echo 34.10.182.81 ++ return + endpoint=34.10.182.81 + cat + local response + local wait_count=30 + local retry=0 ++ jq '.rows[].fingerprint' ++ curl -s -k -H 'Authorization: Bearer glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512' -XPOST -d @payload.json https://34.10.182.81/v1/qan/metrics:getReport + [[ -n "TOTAL" "db.version.find({}).limit(?)" "db.oplog.rs.find({}).sort({\"$natural\":1}).limit(?)" "db.oplog.rs.find({}).sort({\"$natural\":-1}).limit(?)" ]] + rm -f payload.json + get_qan_values mongodb dev-mongos glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 + local service_type=mongodb + local environment=dev-mongos + local token=glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 + local start + local end + local endpoint ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2025-07-03T14:55:55+00:00 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2025-07-04T02:55:55+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ sed -e 's/^"//; s/"$//;' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.eowr07gxZ8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.btf4bwd7hV +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.eowr07gxZ8 +++ cat /tmp/tmp.btf4bwd7hV +++ rm /tmp/tmp.eowr07gxZ8 /tmp/tmp.btf4bwd7hV +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].ip' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UDsvz8DzQi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.eO8bFsNnxN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.UDsvz8DzQi +++ cat /tmp/tmp.eO8bFsNnxN +++ rm /tmp/tmp.UDsvz8DzQi /tmp/tmp.eO8bFsNnxN +++ return 0 ++ local ip=34.10.182.81 ++ '[' -n 34.10.182.81 -a 34.10.182.81 '!=' null ']' ++ echo 34.10.182.81 ++ return + endpoint=34.10.182.81 + cat + local response + local wait_count=30 + local retry=0 ++ curl -s -k -H 'Authorization: Bearer glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512' -XPOST -d @payload.json https://34.10.182.81/v1/qan/metrics:getReport ++ jq '.rows[].fingerprint' + [[ -n "" ]] + rm -f payload.json + desc 'verify that the custom cluster name is configured' + set +o xtrace ----------------------------------------------------------------------------------- verify that the custom cluster name is configured ----------------------------------------------------------------------------------- + verify_custom_cluster_name super-custom glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 monitoring-pmm3-23926-monitoring-pmm3-mongos-0 monitoring-pmm3-23926-monitoring-pmm3-cfg-0 monitoring-pmm3-23926-monitoring-pmm3-rs0-0 + local expected_cluster=super-custom + local token=glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512 + shift 2 + service_names=("$@") + local service_names + local endpoint ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pSmuUvjen2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CSqYpD2niJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.pSmuUvjen2 +++ cat /tmp/tmp.CSqYpD2niJ +++ rm /tmp/tmp.pSmuUvjen2 /tmp/tmp.CSqYpD2niJ +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mYRvb7Aa5G ++++ mktemp +++ local LAST_ERR=/tmp/tmp.17Hh44r4lY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.mYRvb7Aa5G +++ cat /tmp/tmp.17Hh44r4lY +++ rm /tmp/tmp.mYRvb7Aa5G /tmp/tmp.17Hh44r4lY +++ return 0 ++ local ip=34.10.182.81 ++ '[' -n 34.10.182.81 -a 34.10.182.81 '!=' null ']' ++ echo 34.10.182.81 ++ return + endpoint=34.10.182.81 + local response ++ curl -s -k -H 'Authorization: Bearer glsa_x8vxSfP02NB1WGOjPj27sBBnYcevj2hR_ed718512' 'https://34.10.182.81/v1/inventory/services?service_type=SERVICE_TYPE_MONGODB_SERVICE' + response='{ "mysql": [], "mongodb": [ { "service_id": "1ca5a865-e003-407c-8aca-ae1a349cc7c2", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-rs0-1", "node_id": "ef4dec8a-9414-4232-be85-de1dfe22cddf", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "5cd09e3e-e7ac-4106-8e9b-53f374be9e78", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-mongos-1", "node_id": "3b5891b0-102c-4773-9309-5d94af173180", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "6dc392b1-73ba-4f55-a398-38a20a585bd2", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-cfg-2", "node_id": "d99c18ab-f306-4268-96c6-36259e69b028", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "75ee88b9-92e9-4fa0-89dc-2bfff524eac7", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-rs0-0", "node_id": "15d57ce0-20d2-404d-8721-64b178b00166", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "87fa3fc9-bc73-4255-86d1-4514ab854626", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-cfg-0", "node_id": "4ec4d0ab-e96f-4e52-bc6d-fbc1870d398c", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "9aae3a3c-4340-4702-9082-7d6657440286", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-mongos-2", "node_id": "34994576-0ccc-40a1-a96c-e60adc7cfbf8", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "eaca6c70-02ef-4310-aad5-02f2cbaf8620", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-rs0-2", "node_id": "9b2902f2-f4ce-46dd-abbf-f9823c70ab74", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "ee185125-ae9b-482e-82b9-58eb3a94a4ec", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-cfg-1", "node_id": "b0afb311-88be-46c3-aef1-aada1ec75263", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "f1c1f234-5ad5-4993-87f2-1395ac2b2e9f", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-mongos-0", "node_id": "da25b0f4-0b59-47b2-b185-f122adfc0c63", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" } ], "postgresql": [], "proxysql": [], "haproxy": [], "external": [] }' + local verified=0 + for service_name in '"${service_names[@]}"' + local actual_cluster ++ jq -r --arg name monitoring-pmm3-23926-monitoring-pmm3-mongos-0 ' .mongodb[] | select(.service_name == $name) | .cluster ' ++ echo '{ "mysql": [], "mongodb": [ { "service_id": "1ca5a865-e003-407c-8aca-ae1a349cc7c2", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-rs0-1", "node_id": "ef4dec8a-9414-4232-be85-de1dfe22cddf", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "5cd09e3e-e7ac-4106-8e9b-53f374be9e78", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-mongos-1", "node_id": "3b5891b0-102c-4773-9309-5d94af173180", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "6dc392b1-73ba-4f55-a398-38a20a585bd2", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-cfg-2", "node_id": "d99c18ab-f306-4268-96c6-36259e69b028", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "75ee88b9-92e9-4fa0-89dc-2bfff524eac7", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-rs0-0", "node_id": "15d57ce0-20d2-404d-8721-64b178b00166", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "87fa3fc9-bc73-4255-86d1-4514ab854626", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-cfg-0", "node_id": "4ec4d0ab-e96f-4e52-bc6d-fbc1870d398c", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "9aae3a3c-4340-4702-9082-7d6657440286", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-mongos-2", "node_id": "34994576-0ccc-40a1-a96c-e60adc7cfbf8", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "eaca6c70-02ef-4310-aad5-02f2cbaf8620", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-rs0-2", "node_id": "9b2902f2-f4ce-46dd-abbf-f9823c70ab74", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "ee185125-ae9b-482e-82b9-58eb3a94a4ec", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-cfg-1", "node_id": "b0afb311-88be-46c3-aef1-aada1ec75263", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "f1c1f234-5ad5-4993-87f2-1395ac2b2e9f", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-mongos-0", "node_id": "da25b0f4-0b59-47b2-b185-f122adfc0c63", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" } ], "postgresql": [], "proxysql": [], "haproxy": [], "external": [] }' + actual_cluster=super-custom + [[ -z super-custom ]] + [[ super-custom == \n\u\l\l ]] + [[ super-custom != \s\u\p\e\r\-\c\u\s\t\o\m ]] + for service_name in '"${service_names[@]}"' + local actual_cluster ++ echo '{ "mysql": [], "mongodb": [ { "service_id": "1ca5a865-e003-407c-8aca-ae1a349cc7c2", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-rs0-1", "node_id": "ef4dec8a-9414-4232-be85-de1dfe22cddf", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "5cd09e3e-e7ac-4106-8e9b-53f374be9e78", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-mongos-1", "node_id": "3b5891b0-102c-4773-9309-5d94af173180", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "6dc392b1-73ba-4f55-a398-38a20a585bd2", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-cfg-2", "node_id": "d99c18ab-f306-4268-96c6-36259e69b028", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "75ee88b9-92e9-4fa0-89dc-2bfff524eac7", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-rs0-0", "node_id": "15d57ce0-20d2-404d-8721-64b178b00166", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "87fa3fc9-bc73-4255-86d1-4514ab854626", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-cfg-0", "node_id": "4ec4d0ab-e96f-4e52-bc6d-fbc1870d398c", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "9aae3a3c-4340-4702-9082-7d6657440286", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-mongos-2", "node_id": "34994576-0ccc-40a1-a96c-e60adc7cfbf8", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "eaca6c70-02ef-4310-aad5-02f2cbaf8620", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-rs0-2", "node_id": "9b2902f2-f4ce-46dd-abbf-f9823c70ab74", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "ee185125-ae9b-482e-82b9-58eb3a94a4ec", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-cfg-1", "node_id": "b0afb311-88be-46c3-aef1-aada1ec75263", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "f1c1f234-5ad5-4993-87f2-1395ac2b2e9f", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-mongos-0", "node_id": "da25b0f4-0b59-47b2-b185-f122adfc0c63", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" } ], "postgresql": [], "proxysql": [], "haproxy": [], "external": [] }' ++ jq -r --arg name monitoring-pmm3-23926-monitoring-pmm3-cfg-0 ' .mongodb[] | select(.service_name == $name) | .cluster ' + actual_cluster=super-custom + [[ -z super-custom ]] + [[ super-custom == \n\u\l\l ]] + [[ super-custom != \s\u\p\e\r\-\c\u\s\t\o\m ]] + for service_name in '"${service_names[@]}"' + local actual_cluster ++ echo '{ "mysql": [], "mongodb": [ { "service_id": "1ca5a865-e003-407c-8aca-ae1a349cc7c2", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-rs0-1", "node_id": "ef4dec8a-9414-4232-be85-de1dfe22cddf", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "5cd09e3e-e7ac-4106-8e9b-53f374be9e78", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-mongos-1", "node_id": "3b5891b0-102c-4773-9309-5d94af173180", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "6dc392b1-73ba-4f55-a398-38a20a585bd2", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-cfg-2", "node_id": "d99c18ab-f306-4268-96c6-36259e69b028", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "75ee88b9-92e9-4fa0-89dc-2bfff524eac7", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-rs0-0", "node_id": "15d57ce0-20d2-404d-8721-64b178b00166", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "87fa3fc9-bc73-4255-86d1-4514ab854626", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-cfg-0", "node_id": "4ec4d0ab-e96f-4e52-bc6d-fbc1870d398c", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "9aae3a3c-4340-4702-9082-7d6657440286", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-mongos-2", "node_id": "34994576-0ccc-40a1-a96c-e60adc7cfbf8", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "eaca6c70-02ef-4310-aad5-02f2cbaf8620", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-rs0-2", "node_id": "9b2902f2-f4ce-46dd-abbf-f9823c70ab74", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "ee185125-ae9b-482e-82b9-58eb3a94a4ec", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-cfg-1", "node_id": "b0afb311-88be-46c3-aef1-aada1ec75263", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "f1c1f234-5ad5-4993-87f2-1395ac2b2e9f", "service_name": "monitoring-pmm3-23926-monitoring-pmm3-mongos-0", "node_id": "da25b0f4-0b59-47b2-b185-f122adfc0c63", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" } ], "postgresql": [], "proxysql": [], "haproxy": [], "external": [] }' ++ jq -r --arg name monitoring-pmm3-23926-monitoring-pmm3-rs0-0 ' .mongodb[] | select(.service_name == $name) | .cluster ' + actual_cluster=super-custom + [[ -z super-custom ]] + [[ super-custom == \n\u\l\l ]] + [[ super-custom != \s\u\p\e\r\-\c\u\s\t\o\m ]] + return 0 + nodeList=($(get_node_id_from_pmm)) ++ get_node_id_from_pmm ++ nodeList=() ++ local -a nodeList +++ kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IbLJtvNszX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.01BJ4HBzW7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.IbLJtvNszX +++ cat /tmp/tmp.01BJ4HBzW7 +++ rm /tmp/tmp.IbLJtvNszX /tmp/tmp.01BJ4HBzW7 +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ jq -r .pmm_agent_status.node_id +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-pmm3-cfg-0 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.SAm8dXZ2lU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7pK1mjWKju +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-pmm3-cfg-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.SAm8dXZ2lU +++ cat /tmp/tmp.7pK1mjWKju +++ rm /tmp/tmp.SAm8dXZ2lU /tmp/tmp.7pK1mjWKju +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-pmm3-cfg-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.oVu4L6ov6Q ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qfOkg7apnh +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-pmm3-cfg-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.oVu4L6ov6Q +++ cat /tmp/tmp.qfOkg7apnh +++ rm /tmp/tmp.oVu4L6ov6Q /tmp/tmp.qfOkg7apnh +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ jq -r .pmm_agent_status.node_id +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-pmm3-cfg-2 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rGYkWeq3f3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uYnL8Qdl6o +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-pmm3-cfg-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.rGYkWeq3f3 +++ cat /tmp/tmp.uYnL8Qdl6o +++ rm /tmp/tmp.rGYkWeq3f3 /tmp/tmp.uYnL8Qdl6o +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ jq -r .pmm_agent_status.node_id +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-pmm3-mongos-0 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RIVjNlNeJo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.emWjhZhRWO +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-pmm3-mongos-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RIVjNlNeJo +++ cat /tmp/tmp.emWjhZhRWO +++ rm /tmp/tmp.RIVjNlNeJo /tmp/tmp.emWjhZhRWO +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-pmm3-mongos-1 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ jq -r .pmm_agent_status.node_id +++ local LAST_OUT=/tmp/tmp.S1ZNJotM8B ++++ mktemp +++ local LAST_ERR=/tmp/tmp.u81moaGBPx +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-pmm3-mongos-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.S1ZNJotM8B +++ cat /tmp/tmp.u81moaGBPx +++ rm /tmp/tmp.S1ZNJotM8B /tmp/tmp.u81moaGBPx +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ jq -r .pmm_agent_status.node_id +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-pmm3-mongos-2 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.I1gXYqPSEW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TTxKAYcakX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-pmm3-mongos-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.I1gXYqPSEW +++ cat /tmp/tmp.TTxKAYcakX +++ rm /tmp/tmp.I1gXYqPSEW /tmp/tmp.TTxKAYcakX +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-pmm3-rs0-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.38XXqFdktZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.HAP3nDsjIc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-pmm3-rs0-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.38XXqFdktZ +++ cat /tmp/tmp.HAP3nDsjIc +++ rm /tmp/tmp.38XXqFdktZ /tmp/tmp.HAP3nDsjIc +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-pmm3-rs0-1 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8VivDVgdHv +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1MVOIYtZrI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-pmm3-rs0-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.8VivDVgdHv +++ cat /tmp/tmp.1MVOIYtZrI +++ rm /tmp/tmp.8VivDVgdHv /tmp/tmp.1MVOIYtZrI +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-pmm3-rs0-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hR2tnwzS4r ++++ mktemp +++ local LAST_ERR=/tmp/tmp.if5m4s2whS +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-pmm3-rs0-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.hR2tnwzS4r +++ cat /tmp/tmp.if5m4s2whS +++ rm /tmp/tmp.hR2tnwzS4r /tmp/tmp.if5m4s2whS +++ return 0 ++ echo 4ec4d0ab-e96f-4e52-bc6d-fbc1870d398c b0afb311-88be-46c3-aef1-aada1ec75263 d99c18ab-f306-4268-96c6-36259e69b028 da25b0f4-0b59-47b2-b185-f122adfc0c63 3b5891b0-102c-4773-9309-5d94af173180 34994576-0ccc-40a1-a96c-e60adc7cfbf8 15d57ce0-20d2-404d-8721-64b178b00166 ef4dec8a-9414-4232-be85-de1dfe22cddf 9b2902f2-f4ce-46dd-abbf-f9823c70ab74 + nodeList_from_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists 4ec4d0ab-e96f-4e52-bc6d-fbc1870d398c b0afb311-88be-46c3-aef1-aada1ec75263 d99c18ab-f306-4268-96c6-36259e69b028 da25b0f4-0b59-47b2-b185-f122adfc0c63 3b5891b0-102c-4773-9309-5d94af173180 34994576-0ccc-40a1-a96c-e60adc7cfbf8 15d57ce0-20d2-404d-8721-64b178b00166 ef4dec8a-9414-4232-be85-de1dfe22cddf 9b2902f2-f4ce-46dd-abbf-f9823c70ab74 ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 4ec4d0ab-e96f-4e52-bc6d-fbc1870d398c +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Q61UblNEk5 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.6toOTPGpCM ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.Q61UblNEk5 ++++ cat /tmp/tmp.6toOTPGpCM ++++ rm /tmp/tmp.Q61UblNEk5 /tmp/tmp.6toOTPGpCM ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.IdbMOuPJDM +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.lC1Ngcf0Xj ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.IdbMOuPJDM ++++ cat /tmp/tmp.lC1Ngcf0Xj ++++ rm /tmp/tmp.IdbMOuPJDM /tmp/tmp.lC1Ngcf0Xj ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FddPvxhpLl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YiElqbLC7r +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.FddPvxhpLl +++ cat /tmp/tmp.YiElqbLC7r command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.FddPvxhpLl +++ cat /tmp/tmp.YiElqbLC7r command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.FddPvxhpLl +++ cat /tmp/tmp.YiElqbLC7r command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.FddPvxhpLl +++ cat /tmp/tmp.YiElqbLC7r command terminated with exit code 1 +++ rm /tmp/tmp.FddPvxhpLl /tmp/tmp.YiElqbLC7r +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep b0afb311-88be-46c3-aef1-aada1ec75263 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.cSLm7qRntU +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.IiaJuGQ67a ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.cSLm7qRntU ++++ cat /tmp/tmp.IiaJuGQ67a ++++ rm /tmp/tmp.cSLm7qRntU /tmp/tmp.IiaJuGQ67a ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.sASRlLwlDU +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.FkcsHHvr56 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.sASRlLwlDU ++++ cat /tmp/tmp.FkcsHHvr56 ++++ rm /tmp/tmp.sASRlLwlDU /tmp/tmp.FkcsHHvr56 ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.y9AANrRAdw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TrqJXRlthq +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.y9AANrRAdw +++ cat /tmp/tmp.TrqJXRlthq command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.y9AANrRAdw +++ cat /tmp/tmp.TrqJXRlthq command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.y9AANrRAdw +++ cat /tmp/tmp.TrqJXRlthq command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.y9AANrRAdw +++ cat /tmp/tmp.TrqJXRlthq command terminated with exit code 1 +++ rm /tmp/tmp.y9AANrRAdw /tmp/tmp.TrqJXRlthq +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep d99c18ab-f306-4268-96c6-36259e69b028 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.M2L8B7BbSM +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.fzzFPb9qkB ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.M2L8B7BbSM ++++ cat /tmp/tmp.fzzFPb9qkB ++++ rm /tmp/tmp.M2L8B7BbSM /tmp/tmp.fzzFPb9qkB ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.jpqlLZz5ik +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.HAWIJRdasn ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.jpqlLZz5ik ++++ cat /tmp/tmp.HAWIJRdasn ++++ rm /tmp/tmp.jpqlLZz5ik /tmp/tmp.HAWIJRdasn ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.d9E40MFDnf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4mK0nQoeZW +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.d9E40MFDnf +++ cat /tmp/tmp.4mK0nQoeZW command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.d9E40MFDnf +++ cat /tmp/tmp.4mK0nQoeZW command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.d9E40MFDnf +++ cat /tmp/tmp.4mK0nQoeZW command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.d9E40MFDnf +++ cat /tmp/tmp.4mK0nQoeZW command terminated with exit code 1 +++ rm /tmp/tmp.d9E40MFDnf /tmp/tmp.4mK0nQoeZW +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep da25b0f4-0b59-47b2-b185-f122adfc0c63 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.DdDfEUemKC +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.8U8IrsHiXh ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.DdDfEUemKC ++++ cat /tmp/tmp.8U8IrsHiXh ++++ rm /tmp/tmp.DdDfEUemKC /tmp/tmp.8U8IrsHiXh ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.q1In3fXaQc +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Zxg5Gctp5s ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.q1In3fXaQc ++++ cat /tmp/tmp.Zxg5Gctp5s ++++ rm /tmp/tmp.q1In3fXaQc /tmp/tmp.Zxg5Gctp5s ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3KktXsaMcR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.jPfXqZxIoc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.3KktXsaMcR +++ cat /tmp/tmp.jPfXqZxIoc command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.3KktXsaMcR +++ cat /tmp/tmp.jPfXqZxIoc command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.3KktXsaMcR +++ cat /tmp/tmp.jPfXqZxIoc command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.3KktXsaMcR +++ cat /tmp/tmp.jPfXqZxIoc command terminated with exit code 1 +++ rm /tmp/tmp.3KktXsaMcR /tmp/tmp.jPfXqZxIoc +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 3b5891b0-102c-4773-9309-5d94af173180 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.mnA5LT4dVN +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.sAqq1MpBuL ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.mnA5LT4dVN ++++ cat /tmp/tmp.sAqq1MpBuL ++++ rm /tmp/tmp.mnA5LT4dVN /tmp/tmp.sAqq1MpBuL ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.0GPMJpAT2k +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Fh35NjW9d8 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.0GPMJpAT2k ++++ cat /tmp/tmp.Fh35NjW9d8 ++++ rm /tmp/tmp.0GPMJpAT2k /tmp/tmp.Fh35NjW9d8 ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.97GdDJsAHW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.tGRATUY8vJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.97GdDJsAHW +++ cat /tmp/tmp.tGRATUY8vJ command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.97GdDJsAHW +++ cat /tmp/tmp.tGRATUY8vJ command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.97GdDJsAHW +++ cat /tmp/tmp.tGRATUY8vJ command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.97GdDJsAHW +++ cat /tmp/tmp.tGRATUY8vJ command terminated with exit code 1 +++ rm /tmp/tmp.97GdDJsAHW /tmp/tmp.tGRATUY8vJ +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 34994576-0ccc-40a1-a96c-e60adc7cfbf8 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.mhAhG1r7ft +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.kIWsLbu6f2 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.mhAhG1r7ft ++++ cat /tmp/tmp.kIWsLbu6f2 ++++ rm /tmp/tmp.mhAhG1r7ft /tmp/tmp.kIWsLbu6f2 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.TGJSQt6Yht +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.kzQttMvtg5 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.TGJSQt6Yht ++++ cat /tmp/tmp.kzQttMvtg5 ++++ rm /tmp/tmp.TGJSQt6Yht /tmp/tmp.kzQttMvtg5 ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Ly89OBMLNE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hh34A8fWMg +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Ly89OBMLNE +++ cat /tmp/tmp.hh34A8fWMg command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Ly89OBMLNE +++ cat /tmp/tmp.hh34A8fWMg command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Ly89OBMLNE +++ cat /tmp/tmp.hh34A8fWMg command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.Ly89OBMLNE +++ cat /tmp/tmp.hh34A8fWMg command terminated with exit code 1 +++ rm /tmp/tmp.Ly89OBMLNE /tmp/tmp.hh34A8fWMg +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 15d57ce0-20d2-404d-8721-64b178b00166 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.CURz3Hc861 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.VlSmmKTSKE ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.CURz3Hc861 ++++ cat /tmp/tmp.VlSmmKTSKE ++++ rm /tmp/tmp.CURz3Hc861 /tmp/tmp.VlSmmKTSKE ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.d1pg0HW3fN +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.qLBWj3jvz9 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.d1pg0HW3fN ++++ cat /tmp/tmp.qLBWj3jvz9 ++++ rm /tmp/tmp.d1pg0HW3fN /tmp/tmp.qLBWj3jvz9 ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TgK20DtNbx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.q3AwoDpF9F +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.TgK20DtNbx +++ cat /tmp/tmp.q3AwoDpF9F command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.TgK20DtNbx +++ cat /tmp/tmp.q3AwoDpF9F command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.TgK20DtNbx +++ cat /tmp/tmp.q3AwoDpF9F command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.TgK20DtNbx +++ cat /tmp/tmp.q3AwoDpF9F command terminated with exit code 1 +++ rm /tmp/tmp.TgK20DtNbx /tmp/tmp.q3AwoDpF9F +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep ef4dec8a-9414-4232-be85-de1dfe22cddf ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.UShcW3J348 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.SGKtriyQ2E ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.UShcW3J348 ++++ cat /tmp/tmp.SGKtriyQ2E ++++ rm /tmp/tmp.UShcW3J348 /tmp/tmp.SGKtriyQ2E ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.3VFWkTuWlH +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.WKG3Qoq1Kr ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.3VFWkTuWlH ++++ cat /tmp/tmp.WKG3Qoq1Kr ++++ rm /tmp/tmp.3VFWkTuWlH /tmp/tmp.WKG3Qoq1Kr ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.KciuEWvCKH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.tXamrhsrda +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.KciuEWvCKH +++ cat /tmp/tmp.tXamrhsrda command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.KciuEWvCKH +++ cat /tmp/tmp.tXamrhsrda command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.KciuEWvCKH +++ cat /tmp/tmp.tXamrhsrda command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.KciuEWvCKH +++ cat /tmp/tmp.tXamrhsrda command terminated with exit code 1 +++ rm /tmp/tmp.KciuEWvCKH /tmp/tmp.tXamrhsrda +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 9b2902f2-f4ce-46dd-abbf-f9823c70ab74 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.TgkzWm7Q4t +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.oYqKyWeJKa ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.TgkzWm7Q4t ++++ cat /tmp/tmp.oYqKyWeJKa ++++ rm /tmp/tmp.TgkzWm7Q4t /tmp/tmp.oYqKyWeJKa ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.75CDX61IaO +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.zgrtM6hzSS ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.75CDX61IaO ++++ cat /tmp/tmp.zgrtM6hzSS ++++ rm /tmp/tmp.75CDX61IaO /tmp/tmp.zgrtM6hzSS ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.yAm8b8qFxD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.HzIaHFFcnN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.yAm8b8qFxD +++ cat /tmp/tmp.HzIaHFFcnN command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.yAm8b8qFxD +++ cat /tmp/tmp.HzIaHFFcnN command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.yAm8b8qFxD +++ cat /tmp/tmp.HzIaHFFcnN command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.yAm8b8qFxD +++ cat /tmp/tmp.HzIaHFFcnN command terminated with exit code 1 +++ rm /tmp/tmp.yAm8b8qFxD /tmp/tmp.HzIaHFFcnN +++ return 1 ++ echo + kubectl_bin patch psmdb monitoring-pmm3 --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.tfTbtzezZG ++ mktemp + local LAST_ERR=/tmp/tmp.8kNhqT95sz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb monitoring-pmm3 --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tfTbtzezZG perconaservermongodb.psmdb.percona.com/monitoring-pmm3 patched + cat /tmp/tmp.8kNhqT95sz + rm /tmp/tmp.tfTbtzezZG /tmp/tmp.8kNhqT95sz + return 0 + wait_for_delete pod/monitoring-pmm3-mongos-0 + local res=pod/monitoring-pmm3-mongos-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-pmm3-mongos-0 to be deleted....Error from server (NotFound): pods "monitoring-pmm3-mongos-0" not found Error from server (NotFound): pods "monitoring-pmm3-mongos-0" not found Error from server (NotFound): pods "monitoring-pmm3-mongos-0" not found Error from server (NotFound): pods "monitoring-pmm3-mongos-0" not found + wait_for_delete pod/monitoring-pmm3-rs0-0 + local res=pod/monitoring-pmm3-rs0-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-pmm3-rs0-0 to be deletedError from server (NotFound): pods "monitoring-pmm3-rs0-0" not found Error from server (NotFound): pods "monitoring-pmm3-rs0-0" not found Error from server (NotFound): pods "monitoring-pmm3-rs0-0" not found Error from server (NotFound): pods "monitoring-pmm3-rs0-0" not found + wait_for_delete pod/monitoring-pmm3-cfg-0 + local res=pod/monitoring-pmm3-cfg-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-pmm3-cfg-0 to be deletedError from server (NotFound): pods "monitoring-pmm3-cfg-0" not found Error from server (NotFound): pods "monitoring-pmm3-cfg-0" not found Error from server (NotFound): pods "monitoring-pmm3-cfg-0" not found Error from server (NotFound): pods "monitoring-pmm3-cfg-0" not found + desc 'check if services are not deleted' + set +o xtrace ----------------------------------------------------------------------------------- check if services are not deleted ----------------------------------------------------------------------------------- + kubectl_bin get svc monitoring-pmm3-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.6pKaRpRljT ++ mktemp + local LAST_ERR=/tmp/tmp.VON9grQm9I + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get svc monitoring-pmm3-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6pKaRpRljT NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-pmm3-rs0 ClusterIP None 27019/TCP 39m + cat /tmp/tmp.VON9grQm9I + rm /tmp/tmp.6pKaRpRljT /tmp/tmp.VON9grQm9I + return 0 + kubectl_bin get svc monitoring-pmm3-cfg ++ mktemp + local LAST_OUT=/tmp/tmp.qucYJXla12 ++ mktemp + local LAST_ERR=/tmp/tmp.nzGh9gJSjv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get svc monitoring-pmm3-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qucYJXla12 NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-pmm3-cfg ClusterIP None 27019/TCP 39m + cat /tmp/tmp.nzGh9gJSjv + rm /tmp/tmp.qucYJXla12 /tmp/tmp.nzGh9gJSjv + return 0 + kubectl_bin get svc monitoring-pmm3-mongos ++ mktemp + local LAST_OUT=/tmp/tmp.PWICyQNdCQ ++ mktemp + local LAST_ERR=/tmp/tmp.8DVMuKi57l + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get svc monitoring-pmm3-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PWICyQNdCQ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-pmm3-mongos ClusterIP 34.118.229.1 27019/TCP 39m + cat /tmp/tmp.8DVMuKi57l + rm /tmp/tmp.PWICyQNdCQ /tmp/tmp.8DVMuKi57l + return 0 + does_node_id_exists_in_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists 4ec4d0ab-e96f-4e52-bc6d-fbc1870d398c b0afb311-88be-46c3-aef1-aada1ec75263 d99c18ab-f306-4268-96c6-36259e69b028 da25b0f4-0b59-47b2-b185-f122adfc0c63 3b5891b0-102c-4773-9309-5d94af173180 34994576-0ccc-40a1-a96c-e60adc7cfbf8 15d57ce0-20d2-404d-8721-64b178b00166 ef4dec8a-9414-4232-be85-de1dfe22cddf 9b2902f2-f4ce-46dd-abbf-f9823c70ab74 ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 4ec4d0ab-e96f-4e52-bc6d-fbc1870d398c +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.pOQwrtwx4C +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.sKt1Ezjinw ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.pOQwrtwx4C ++++ cat /tmp/tmp.sKt1Ezjinw ++++ rm /tmp/tmp.pOQwrtwx4C /tmp/tmp.sKt1Ezjinw ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.bs0JYNOnXV +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.IhpKc5IXzp ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.bs0JYNOnXV ++++ cat /tmp/tmp.IhpKc5IXzp ++++ rm /tmp/tmp.bs0JYNOnXV /tmp/tmp.IhpKc5IXzp ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Mw7QamwvOs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.WNwhwFsrUb +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Mw7QamwvOs +++ cat /tmp/tmp.WNwhwFsrUb error: Internal error occurred: unable to upgrade connection: container not found ("pmm") +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Mw7QamwvOs +++ cat /tmp/tmp.WNwhwFsrUb error: Internal error occurred: unable to upgrade connection: container not found ("pmm") +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Mw7QamwvOs +++ cat /tmp/tmp.WNwhwFsrUb error: Internal error occurred: unable to upgrade connection: container not found ("pmm") +++ sleep 8 +++ cat /tmp/tmp.Mw7QamwvOs +++ cat /tmp/tmp.WNwhwFsrUb error: Internal error occurred: unable to upgrade connection: container not found ("pmm") +++ rm /tmp/tmp.Mw7QamwvOs /tmp/tmp.WNwhwFsrUb +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) ++++ get_pmm_service_ip monitoring-service +++ grep b0afb311-88be-46c3-aef1-aada1ec75263 ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++ awk '{print $4}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.NcXnvHeS1v +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.4vABvb0tV1 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.NcXnvHeS1v ++++ cat /tmp/tmp.4vABvb0tV1 ++++ rm /tmp/tmp.NcXnvHeS1v /tmp/tmp.4vABvb0tV1 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.S1a35G4Uq9 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Ga0Q0f2IZ9 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.S1a35G4Uq9 ++++ cat /tmp/tmp.Ga0Q0f2IZ9 ++++ rm /tmp/tmp.S1a35G4Uq9 /tmp/tmp.Ga0Q0f2IZ9 ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8P28p6hebo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7jq27aW5UQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.8P28p6hebo +++ cat /tmp/tmp.7jq27aW5UQ command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.8P28p6hebo +++ cat /tmp/tmp.7jq27aW5UQ command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.8P28p6hebo +++ cat /tmp/tmp.7jq27aW5UQ command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.8P28p6hebo +++ cat /tmp/tmp.7jq27aW5UQ command terminated with exit code 1 +++ rm /tmp/tmp.8P28p6hebo /tmp/tmp.7jq27aW5UQ +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep d99c18ab-f306-4268-96c6-36259e69b028 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.s1SJEttoMt +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.9q4joWwtCx ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.s1SJEttoMt ++++ cat /tmp/tmp.9q4joWwtCx ++++ rm /tmp/tmp.s1SJEttoMt /tmp/tmp.9q4joWwtCx ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.PZcfNoGcJC +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.68MZGExeB4 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.PZcfNoGcJC ++++ cat /tmp/tmp.68MZGExeB4 ++++ rm /tmp/tmp.PZcfNoGcJC /tmp/tmp.68MZGExeB4 ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RAE9F1FMrA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bWgNIUUyuB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.RAE9F1FMrA +++ cat /tmp/tmp.bWgNIUUyuB command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.RAE9F1FMrA +++ cat /tmp/tmp.bWgNIUUyuB command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.RAE9F1FMrA +++ cat /tmp/tmp.bWgNIUUyuB command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.RAE9F1FMrA +++ cat /tmp/tmp.bWgNIUUyuB command terminated with exit code 1 +++ rm /tmp/tmp.RAE9F1FMrA /tmp/tmp.bWgNIUUyuB +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep da25b0f4-0b59-47b2-b185-f122adfc0c63 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.WkpUHb97WM +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.5oYnmIUVnA ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.WkpUHb97WM ++++ cat /tmp/tmp.5oYnmIUVnA ++++ rm /tmp/tmp.WkpUHb97WM /tmp/tmp.5oYnmIUVnA ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.VWRxonXasQ +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.CtEP2MPOTc ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.VWRxonXasQ ++++ cat /tmp/tmp.CtEP2MPOTc ++++ rm /tmp/tmp.VWRxonXasQ /tmp/tmp.CtEP2MPOTc ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Gc20syRJxo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.shrHeqpo6X +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Gc20syRJxo +++ cat /tmp/tmp.shrHeqpo6X command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Gc20syRJxo +++ cat /tmp/tmp.shrHeqpo6X command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Gc20syRJxo +++ cat /tmp/tmp.shrHeqpo6X command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.Gc20syRJxo +++ cat /tmp/tmp.shrHeqpo6X command terminated with exit code 1 +++ rm /tmp/tmp.Gc20syRJxo /tmp/tmp.shrHeqpo6X +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 3b5891b0-102c-4773-9309-5d94af173180 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.KsZefeiP5m +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.JalyScCdHW ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.KsZefeiP5m ++++ cat /tmp/tmp.JalyScCdHW ++++ rm /tmp/tmp.KsZefeiP5m /tmp/tmp.JalyScCdHW ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.1aPdNcwIcC +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.5XRKPRkP4G ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.1aPdNcwIcC ++++ cat /tmp/tmp.5XRKPRkP4G ++++ rm /tmp/tmp.1aPdNcwIcC /tmp/tmp.5XRKPRkP4G ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.oFzCOVJS2D ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MEoYS8MH1L +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.oFzCOVJS2D +++ cat /tmp/tmp.MEoYS8MH1L command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.oFzCOVJS2D +++ cat /tmp/tmp.MEoYS8MH1L command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.oFzCOVJS2D +++ cat /tmp/tmp.MEoYS8MH1L command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.oFzCOVJS2D +++ cat /tmp/tmp.MEoYS8MH1L command terminated with exit code 1 +++ rm /tmp/tmp.oFzCOVJS2D /tmp/tmp.MEoYS8MH1L +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep 34994576-0ccc-40a1-a96c-e60adc7cfbf8 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.KxgPq4hfSF +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.jXh4akYpei ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.KxgPq4hfSF ++++ cat /tmp/tmp.jXh4akYpei ++++ rm /tmp/tmp.KxgPq4hfSF /tmp/tmp.jXh4akYpei ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.hyqLxw4A3v +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Iew3l0xPDq ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.hyqLxw4A3v ++++ cat /tmp/tmp.Iew3l0xPDq ++++ rm /tmp/tmp.hyqLxw4A3v /tmp/tmp.Iew3l0xPDq ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xRemIF6gLB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vSbXK9Aj0i +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.xRemIF6gLB +++ cat /tmp/tmp.vSbXK9Aj0i command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.xRemIF6gLB +++ cat /tmp/tmp.vSbXK9Aj0i command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.xRemIF6gLB +++ cat /tmp/tmp.vSbXK9Aj0i command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.xRemIF6gLB +++ cat /tmp/tmp.vSbXK9Aj0i command terminated with exit code 1 +++ rm /tmp/tmp.xRemIF6gLB /tmp/tmp.vSbXK9Aj0i +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 15d57ce0-20d2-404d-8721-64b178b00166 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.tqfFJpKSyb +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.bZlPSB4e3U ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.tqfFJpKSyb ++++ cat /tmp/tmp.bZlPSB4e3U ++++ rm /tmp/tmp.tqfFJpKSyb /tmp/tmp.bZlPSB4e3U ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Ym0vBcN0YG +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.XjD5ubtrhw ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.Ym0vBcN0YG ++++ cat /tmp/tmp.XjD5ubtrhw ++++ rm /tmp/tmp.Ym0vBcN0YG /tmp/tmp.XjD5ubtrhw ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GybD90OV9E ++++ mktemp +++ local LAST_ERR=/tmp/tmp.N4FQZT2Qqf +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.GybD90OV9E +++ cat /tmp/tmp.N4FQZT2Qqf command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.GybD90OV9E +++ cat /tmp/tmp.N4FQZT2Qqf command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.GybD90OV9E +++ cat /tmp/tmp.N4FQZT2Qqf command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.GybD90OV9E +++ cat /tmp/tmp.N4FQZT2Qqf command terminated with exit code 1 +++ rm /tmp/tmp.GybD90OV9E /tmp/tmp.N4FQZT2Qqf +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep ef4dec8a-9414-4232-be85-de1dfe22cddf +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.OmZ3tTGwdx +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.NjAKkzqWCQ ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.OmZ3tTGwdx ++++ cat /tmp/tmp.NjAKkzqWCQ ++++ rm /tmp/tmp.OmZ3tTGwdx /tmp/tmp.NjAKkzqWCQ ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.YqnI0KlfFO +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.cq5XTvMt2V ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.YqnI0KlfFO ++++ cat /tmp/tmp.cq5XTvMt2V ++++ rm /tmp/tmp.YqnI0KlfFO /tmp/tmp.cq5XTvMt2V ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.02quvLKxrk ++++ mktemp +++ local LAST_ERR=/tmp/tmp.41HNq49u0n +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.02quvLKxrk +++ cat /tmp/tmp.41HNq49u0n command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.02quvLKxrk +++ cat /tmp/tmp.41HNq49u0n command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.02quvLKxrk +++ cat /tmp/tmp.41HNq49u0n command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.02quvLKxrk +++ cat /tmp/tmp.41HNq49u0n command terminated with exit code 1 +++ rm /tmp/tmp.02quvLKxrk /tmp/tmp.41HNq49u0n +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep 9b2902f2-f4ce-46dd-abbf-f9823c70ab74 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.A9OJrzF2oj +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.1VTlvrh741 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.A9OJrzF2oj ++++ cat /tmp/tmp.1VTlvrh741 ++++ rm /tmp/tmp.A9OJrzF2oj /tmp/tmp.1VTlvrh741 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.maTO1wIVno +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ziMHyEVZxI ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.maTO1wIVno ++++ cat /tmp/tmp.ziMHyEVZxI ++++ rm /tmp/tmp.maTO1wIVno /tmp/tmp.ziMHyEVZxI ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kimKdhB4BS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mIgKb9G7Sr +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.kimKdhB4BS +++ cat /tmp/tmp.mIgKb9G7Sr command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.kimKdhB4BS +++ cat /tmp/tmp.mIgKb9G7Sr command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-pmm3-23926 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@34.10.182.81/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.kimKdhB4BS +++ cat /tmp/tmp.mIgKb9G7Sr command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.kimKdhB4BS +++ cat /tmp/tmp.mIgKb9G7Sr command terminated with exit code 1 +++ rm /tmp/tmp.kimKdhB4BS /tmp/tmp.mIgKb9G7Sr +++ return 1 ++ echo + [[ -n '' ]] ++ kubectl_bin logs monitoring-pmm3-rs0-0 pmm-client ++ grep -c 'cannot auto discover databases and collections' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6vuAFDxmkD +++ mktemp ++ local LAST_ERR=/tmp/tmp.xuboOrect4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl logs monitoring-pmm3-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.6vuAFDxmkD ++ cat /tmp/tmp.xuboOrect4 error: error from server (NotFound): pods "monitoring-pmm3-rs0-0" not found in namespace "monitoring-pmm3-23926" ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl logs monitoring-pmm3-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.6vuAFDxmkD ++ cat /tmp/tmp.xuboOrect4 error: error from server (NotFound): pods "monitoring-pmm3-rs0-0" not found in namespace "monitoring-pmm3-23926" ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl logs monitoring-pmm3-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.6vuAFDxmkD ++ cat /tmp/tmp.xuboOrect4 error: error from server (NotFound): pods "monitoring-pmm3-rs0-0" not found in namespace "monitoring-pmm3-23926" ++ sleep 8 ++ cat /tmp/tmp.6vuAFDxmkD ++ cat /tmp/tmp.xuboOrect4 error: error from server (NotFound): pods "monitoring-pmm3-rs0-0" not found in namespace "monitoring-pmm3-23926" ++ rm /tmp/tmp.6vuAFDxmkD /tmp/tmp.xuboOrect4 ++ return 1 + [[ 0 != 0 ]] + desc 'check for passwords leak' + set +o xtrace ----------------------------------------------------------------------------------- check for passwords leak ----------------------------------------------------------------------------------- + check_passwords_leak + local secrets + local passwords + local pods ++ jq -r '.items[].data | to_entries | .[] | select(.key | (contains("_PASSWORD"))) | .value' ++ kubectl_bin get secrets -o json +++ mktemp ++ local LAST_OUT=/tmp/tmp.G56A1SR1At +++ mktemp ++ local LAST_ERR=/tmp/tmp.3BxY6ySZin ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.G56A1SR1At ++ cat /tmp/tmp.3BxY6ySZin ++ rm /tmp/tmp.G56A1SR1At /tmp/tmp.3BxY6ySZin ++ return 0 + secrets='YmFja3VwMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 dXNlckFkbWluMTIzNDU2 KHo+Lj84NWcvSGl4UypdaA== YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2' + echo secrets=YmFja3VwMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 dXNlckFkbWluMTIzNDU2 KHo+Lj84NWcvSGl4UypdaA== YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 secrets=YmFja3VwMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 dXNlckFkbWluMTIzNDU2 KHo+Lj84NWcvSGl4UypdaA== YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo + passwords='backup123456 backup123456 clusterAdmin123456 clusterAdmin123456 clusterMonitor123456 clusterMonitor123456 databaseAdmin123456 databaseAdmin123456 userAdmin123456 userAdmin123456 (z>.?85g/HixS*]h backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 dXNlckFkbWluMTIzNDU2 KHo+Lj84NWcvSGl4UypdaA== YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2' + echo passwords=backup123456 backup123456 clusterAdmin123456 clusterAdmin123456 clusterMonitor123456 clusterMonitor123456 databaseAdmin123456 databaseAdmin123456 userAdmin123456 userAdmin123456 '(z>.?85g/HixS*]h' backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 dXNlckFkbWluMTIzNDU2 KHo+Lj84NWcvSGl4UypdaA== YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 passwords=backup123456 backup123456 clusterAdmin123456 clusterAdmin123456 clusterMonitor123456 clusterMonitor123456 databaseAdmin123456 databaseAdmin123456 userAdmin123456 userAdmin123456 (z>.?85g/HixS*]h backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 dXNlckFkbWluMTIzNDU2 KHo+Lj84NWcvSGl4UypdaA== YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 ++ awk -F / '{print $2}' ++ kubectl_bin get pods -o name +++ mktemp ++ local LAST_OUT=/tmp/tmp.CDzVyucKqD +++ mktemp ++ local LAST_ERR=/tmp/tmp.z2zuOaSsyz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods -o name ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CDzVyucKqD ++ cat /tmp/tmp.z2zuOaSsyz ++ rm /tmp/tmp.CDzVyucKqD /tmp/tmp.z2zuOaSsyz ++ return 0 + pods='monitoring-server-0 psmdb-client-f8b7b5fb5-dvqzl' + echo pods=monitoring-server-0 psmdb-client-f8b7b5fb5-dvqzl pods=monitoring-server-0 psmdb-client-f8b7b5fb5-dvqzl + collect_logs monitoring-pmm3-23926 + local containers + local count + NS=monitoring-pmm3-23926 + for p in '$pods' ++ kubectl_bin -n monitoring-pmm3-23926 get pod monitoring-server-0 -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ssZs3Raqxa +++ mktemp ++ local LAST_ERR=/tmp/tmp.5CRJgMGFL1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n monitoring-pmm3-23926 get pod monitoring-server-0 -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ssZs3Raqxa ++ cat /tmp/tmp.5CRJgMGFL1 ++ rm /tmp/tmp.ssZs3Raqxa /tmp/tmp.5CRJgMGFL1 ++ return 0 + containers=pmm + for c in '$containers' + [[ pmm =~ pmm ]] + continue + echo + for p in '$pods' ++ kubectl_bin -n monitoring-pmm3-23926 get pod psmdb-client-f8b7b5fb5-dvqzl -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.d3SCtJEVNG +++ mktemp ++ local LAST_ERR=/tmp/tmp.6Kgo8902Ay ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n monitoring-pmm3-23926 get pod psmdb-client-f8b7b5fb5-dvqzl -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.d3SCtJEVNG ++ cat /tmp/tmp.6Kgo8902Ay ++ rm /tmp/tmp.d3SCtJEVNG /tmp/tmp.6Kgo8902Ay ++ return 0 + containers=psmdb-client + for c in '$containers' + [[ psmdb-client =~ pmm ]] + kubectl_bin -n monitoring-pmm3-23926 logs psmdb-client-f8b7b5fb5-dvqzl -c psmdb-client ++ mktemp + local LAST_OUT=/tmp/tmp.LUBGVhQgwD ++ mktemp + local LAST_ERR=/tmp/tmp.qubQmG7DGl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n monitoring-pmm3-23926 logs psmdb-client-f8b7b5fb5-dvqzl -c psmdb-client + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LUBGVhQgwD + cat /tmp/tmp.qubQmG7DGl + rm /tmp/tmp.LUBGVhQgwD /tmp/tmp.qubQmG7DGl + return 0 + echo logs saved in: /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt logs saved in: /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- '(z>.?85g/HixS*]h' /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- KHo+Lj84NWcvSGl4UypdaA== /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.NJNkwVxXky/logs_output-psmdb-client-f8b7b5fb5-dvqzl-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + '[' -n psmdb-operator ']' ++ awk -F / '{print $2}' ++ kubectl_bin -n psmdb-operator get pods -o name +++ mktemp ++ local LAST_OUT=/tmp/tmp.pQRjtv6hGA +++ mktemp ++ local LAST_ERR=/tmp/tmp.7h0LnMfNhH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n psmdb-operator get pods -o name ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pQRjtv6hGA ++ cat /tmp/tmp.7h0LnMfNhH ++ rm /tmp/tmp.pQRjtv6hGA /tmp/tmp.7h0LnMfNhH ++ return 0 + pods=percona-server-mongodb-operator-7dbb56857b-c9m4n + collect_logs psmdb-operator + local containers + local count + NS=psmdb-operator + for p in '$pods' ++ kubectl_bin -n psmdb-operator get pod percona-server-mongodb-operator-7dbb56857b-c9m4n -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.L354FdlrkK +++ mktemp ++ local LAST_ERR=/tmp/tmp.yz8WXxgIsx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n psmdb-operator get pod percona-server-mongodb-operator-7dbb56857b-c9m4n -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.L354FdlrkK ++ cat /tmp/tmp.yz8WXxgIsx ++ rm /tmp/tmp.L354FdlrkK /tmp/tmp.yz8WXxgIsx ++ return 0 + containers=percona-server-mongodb-operator + for c in '$containers' + [[ percona-server-mongodb-operator =~ pmm ]] + kubectl_bin -n psmdb-operator logs percona-server-mongodb-operator-7dbb56857b-c9m4n -c percona-server-mongodb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.xq6bWSzFED ++ mktemp + local LAST_ERR=/tmp/tmp.Jc7uKTY0lb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n psmdb-operator logs percona-server-mongodb-operator-7dbb56857b-c9m4n -c percona-server-mongodb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xq6bWSzFED + cat /tmp/tmp.Jc7uKTY0lb + rm /tmp/tmp.xq6bWSzFED /tmp/tmp.Jc7uKTY0lb + return 0 + echo logs saved in: /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt logs saved in: /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- '(z>.?85g/HixS*]h' /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- KHo+Lj84NWcvSGl4UypdaA== /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.NJNkwVxXky/logs_output-percona-server-mongodb-operator-7dbb56857b-c9m4n-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + helm uninstall monitoring release "monitoring" uninstalled + destroy monitoring-pmm3-23926 + local namespace=monitoring-pmm3-23926 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.C50wOQwxbJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.NtKoZvRJuA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.C50wOQwxbJ ++ cat /tmp/tmp.NtKoZvRJuA No resources found in monitoring-pmm3-23926 namespace. ++ rm /tmp/tmp.C50wOQwxbJ /tmp/tmp.NtKoZvRJuA ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.Nr9GDj4QjH ++ mktemp + local LAST_ERR=/tmp/tmp.VPmwD7Kwut + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Nr9GDj4QjH customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.VPmwD7Kwut + rm /tmp/tmp.Nr9GDj4QjH /tmp/tmp.VPmwD7Kwut + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/crd.yaml + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.yrwqAADQM3 ++ mktemp + local LAST_ERR=/tmp/tmp.1ARIjU7W0q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yrwqAADQM3 + cat /tmp/tmp.1ARIjU7W0q + rm /tmp/tmp.yrwqAADQM3 /tmp/tmp.1ARIjU7W0q + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.KUOKWhl9tY ++ mktemp + local LAST_ERR=/tmp/tmp.TInkHyTury + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KUOKWhl9tY + cat /tmp/tmp.TInkHyTury + rm /tmp/tmp.KUOKWhl9tY /tmp/tmp.TInkHyTury + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.L5KCLUg84P ++ mktemp + local LAST_ERR=/tmp/tmp.xtYAuseKba + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.L5KCLUg84P + cat /tmp/tmp.xtYAuseKba + rm /tmp/tmp.L5KCLUg84P /tmp/tmp.xtYAuseKba + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.oLXQiL3aJV ++ mktemp + local LAST_ERR=/tmp/tmp.yoN6Qp6o22 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1993/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oLXQiL3aJV clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.yoN6Qp6o22 + rm /tmp/tmp.oLXQiL3aJV /tmp/tmp.yoN6Qp6o22 + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.p34DIWrLgS ++ mktemp + local LAST_ERR=/tmp/tmp.38Agak26U3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.p34DIWrLgS namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted serviceaccount "cert-manager" deleted serviceaccount "cert-manager-webhook" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.38Agak26U3 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.p34DIWrLgS + cat /tmp/tmp.38Agak26U3 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.p34DIWrLgS + cat /tmp/tmp.38Agak26U3 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.p34DIWrLgS + cat /tmp/tmp.38Agak26U3 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.p34DIWrLgS /tmp/tmp.38Agak26U3 + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + rm -rf /tmp/tmp.NJNkwVxXky + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator + kubectl_bin delete --grace-period=0 --force=true namespace monitoring-pmm3-23926 ++ mktemp + local LAST_OUT=/tmp/tmp.XWlf9r3YZ4 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.Oqrh84DrAe + local exit_status=0 + local timeout=4 + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ seq 0 2 + local LAST_OUT=/tmp/tmp.w4uO2ESgBF + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace monitoring-pmm3-23926 ++ mktemp + local LAST_ERR=/tmp/tmp.HYlVM63exf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.w4uO2ESgBF namespace "psmdb-operator" force deleted + cat /tmp/tmp.HYlVM63exf Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. + rm /tmp/tmp.w4uO2ESgBF /tmp/tmp.HYlVM63exf + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XWlf9r3YZ4 namespace "monitoring-pmm3-23926" force deleted + cat /tmp/tmp.Oqrh84DrAe Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. + rm /tmp/tmp.XWlf9r3YZ4 /tmp/tmp.Oqrh84DrAe + return 0