Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/logs/monitoring-pmm3.log grep: warning: stray \ before - Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + create_infra monitoring-pmm3-8293 + local ns=monitoring-pmm3-8293 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.VjnYadj7Co ++ mktemp + local LAST_ERR=/tmp/tmp.6tSZDxPWd5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VjnYadj7Co customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.6tSZDxPWd5 + rm /tmp/tmp.VjnYadj7Co /tmp/tmp.6tSZDxPWd5 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.dbM6u1DomY ++ mktemp + local LAST_ERR=/tmp/tmp.P5U667YMT5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dbM6u1DomY + cat /tmp/tmp.P5U667YMT5 + rm /tmp/tmp.dbM6u1DomY /tmp/tmp.P5U667YMT5 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.oYQNPPCmkN ++ mktemp + local LAST_ERR=/tmp/tmp.qoaklvc6bV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oYQNPPCmkN + cat /tmp/tmp.qoaklvc6bV + rm /tmp/tmp.oYQNPPCmkN /tmp/tmp.qoaklvc6bV + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.gcW4zIpRxw ++ mktemp + local LAST_ERR=/tmp/tmp.UypsIDqwtI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gcW4zIpRxw customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.UypsIDqwtI + rm /tmp/tmp.gcW4zIpRxw /tmp/tmp.UypsIDqwtI + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.PBeI1PmYuo ++ mktemp + local LAST_ERR=/tmp/tmp.wSuAwnEZye + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PBeI1PmYuo clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.wSuAwnEZye + rm /tmp/tmp.PBeI1PmYuo /tmp/tmp.wSuAwnEZye + return 0 + check_crd_for_deletion PR-1850-4eb33c10 + local git_tag=PR-1850-4eb33c10 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1850-4eb33c10/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.khQlDY8Ncd +++ mktemp ++ local LAST_ERR=/tmp/tmp.VoIcwzkxtX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.khQlDY8Ncd ++ cat /tmp/tmp.VoIcwzkxtX Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.khQlDY8Ncd ++ cat /tmp/tmp.VoIcwzkxtX Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.khQlDY8Ncd ++ cat /tmp/tmp.VoIcwzkxtX Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.khQlDY8Ncd ++ cat /tmp/tmp.VoIcwzkxtX Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.khQlDY8Ncd /tmp/tmp.VoIcwzkxtX ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns ++ mktemp ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.fK6yhjDyAR ++ mktemp + local LAST_OUT=/tmp/tmp.avEuuHXMG9 + local LAST_ERR=/tmp/tmp.CTBlsa0L1j + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.Uy6LGrfgjP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fK6yhjDyAR + cat /tmp/tmp.CTBlsa0L1j + rm /tmp/tmp.fK6yhjDyAR /tmp/tmp.CTBlsa0L1j + return 0 namespace "cert-manager" deleted namespace "monitoring-pmm3-26668" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.avEuuHXMG9 namespace "psmdb-operator" deleted + cat /tmp/tmp.Uy6LGrfgjP + rm /tmp/tmp.avEuuHXMG9 /tmp/tmp.Uy6LGrfgjP + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.bzsRkcDNnk ++ mktemp + local LAST_ERR=/tmp/tmp.8Dz1wcAdvM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bzsRkcDNnk + cat /tmp/tmp.8Dz1wcAdvM + rm /tmp/tmp.bzsRkcDNnk /tmp/tmp.8Dz1wcAdvM + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.mGV7zyfKdE ++ mktemp + local LAST_ERR=/tmp/tmp.pK9YdoWGWz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mGV7zyfKdE namespace/psmdb-operator created + cat /tmp/tmp.pK9YdoWGWz + rm /tmp/tmp.mGV7zyfKdE /tmp/tmp.pK9YdoWGWz + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.XJ5gJJ8CUl +++ mktemp ++ local LAST_ERR=/tmp/tmp.vcx2mpeDmo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XJ5gJJ8CUl ++ cat /tmp/tmp.vcx2mpeDmo ++ rm /tmp/tmp.XJ5gJJ8CUl /tmp/tmp.vcx2mpeDmo ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1850-4eb33c10-13-cluster1 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.gwCytZT91K ++ mktemp + local LAST_ERR=/tmp/tmp.DKFecm7Xzx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1850-4eb33c10-13-cluster1 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gwCytZT91K Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1850-4eb33c10-13-cluster1" modified. + cat /tmp/tmp.DKFecm7Xzx + rm /tmp/tmp.gwCytZT91K /tmp/tmp.DKFecm7Xzx + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-1850-4eb33c10' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-1850-4eb33c10 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.TRnhqnNxDx ++ mktemp + local LAST_ERR=/tmp/tmp.mnq8elurml + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TRnhqnNxDx customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.mnq8elurml + rm /tmp/tmp.TRnhqnNxDx /tmp/tmp.mnq8elurml + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.bm2mgd4xiJ ++ mktemp + local LAST_ERR=/tmp/tmp.S6NtHAfERJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bm2mgd4xiJ clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.S6NtHAfERJ + rm /tmp/tmp.bm2mgd4xiJ /tmp/tmp.S6NtHAfERJ + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1850-4eb33c10") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.1fG476n3ar ++ mktemp + local LAST_ERR=/tmp/tmp.QmvrxGEEaF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1fG476n3ar deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.QmvrxGEEaF + rm /tmp/tmp.1fG476n3ar /tmp/tmp.QmvrxGEEaF + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.EGj8ZXyKUA +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZSkGGeNE0G ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EGj8ZXyKUA ++ cat /tmp/tmp.ZSkGGeNE0G ++ rm /tmp/tmp.EGj8ZXyKUA /tmp/tmp.ZSkGGeNE0G ++ return 0 + wait_operator_pod percona-server-mongodb-operator-67bdb49f8b-xpxgj + local pod=percona-server-mongodb-operator-67bdb49f8b-xpxgj + set +o xtrace waiting for pod/percona-server-mongodb-operator-67bdb49f8b-xpxgj to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.EBMZevBbTx +++ mktemp ++ local LAST_ERR=/tmp/tmp.xaWCuXg21Y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EBMZevBbTx ++ cat /tmp/tmp.xaWCuXg21Y ++ rm /tmp/tmp.EBMZevBbTx /tmp/tmp.xaWCuXg21Y ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-67bdb49f8b-xpxgj ++ mktemp + local LAST_OUT=/tmp/tmp.ptquUpG1N4 ++ mktemp + local LAST_ERR=/tmp/tmp.4z81F9zeY7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-67bdb49f8b-xpxgj + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ptquUpG1N4 + cat /tmp/tmp.4z81F9zeY7 + rm /tmp/tmp.ptquUpG1N4 /tmp/tmp.4z81F9zeY7 + return 0 2025-11-10T07:11:05.635Z INFO setup Manager starting up {"gitCommit": "4eb33c104d0cbf3fd7123b6a71ad408498660a69", "gitBranch": "PR-1850-4eb33c10", "buildTime": "", "goVersion": "go1.25.4", "os": "linux", "arch": "amd64"} + create_namespace monitoring-pmm3-8293 + local namespace=monitoring-pmm3-8293 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces monitoring-pmm3-8293' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-pmm3-8293 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-pmm3-8293 --ignore-not-found ++ mktemp ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.Jcx8hNFBcG ++ mktemp + local LAST_OUT=/tmp/tmp.ddnMljJj9H + local LAST_ERR=/tmp/tmp.em9ERrg4BU ++ mktemp + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.zBKIqbV40c + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get ns ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace monitoring-pmm3-8293 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Jcx8hNFBcG + cat /tmp/tmp.em9ERrg4BU + rm /tmp/tmp.Jcx8hNFBcG /tmp/tmp.em9ERrg4BU + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ddnMljJj9H + cat /tmp/tmp.zBKIqbV40c + rm /tmp/tmp.ddnMljJj9H /tmp/tmp.zBKIqbV40c + return 0 + kubectl_bin wait --for=delete namespace monitoring-pmm3-8293 ++ mktemp + local LAST_OUT=/tmp/tmp.qlnBCcTydW ++ mktemp + local LAST_ERR=/tmp/tmp.DsVWQGHMJP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace monitoring-pmm3-8293 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qlnBCcTydW + cat /tmp/tmp.DsVWQGHMJP + rm /tmp/tmp.qlnBCcTydW /tmp/tmp.DsVWQGHMJP + return 0 + desc 'create namespace monitoring-pmm3-8293' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-pmm3-8293 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-pmm3-8293 ++ mktemp + local LAST_OUT=/tmp/tmp.X6vklB9jIx ++ mktemp + local LAST_ERR=/tmp/tmp.bISyANxZrN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace monitoring-pmm3-8293 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.X6vklB9jIx namespace/monitoring-pmm3-8293 created + cat /tmp/tmp.bISyANxZrN + rm /tmp/tmp.X6vklB9jIx /tmp/tmp.bISyANxZrN + return 0 + set_kube_ctx monitoring-pmm3-8293 + local namespace=monitoring-pmm3-8293 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.nl5RuwGb1w +++ mktemp ++ local LAST_ERR=/tmp/tmp.EbYiAWwia1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nl5RuwGb1w ++ cat /tmp/tmp.EbYiAWwia1 ++ rm /tmp/tmp.nl5RuwGb1w /tmp/tmp.EbYiAWwia1 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1850-4eb33c10-13-cluster1 --namespace=monitoring-pmm3-8293 ++ mktemp + local LAST_OUT=/tmp/tmp.ZeByY3eJiU ++ mktemp + local LAST_ERR=/tmp/tmp.OvA3G7vQkJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1850-4eb33c10-13-cluster1 --namespace=monitoring-pmm3-8293 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZeByY3eJiU Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1850-4eb33c10-13-cluster1" modified. + cat /tmp/tmp.OvA3G7vQkJ + rm /tmp/tmp.ZeByY3eJiU /tmp/tmp.OvA3G7vQkJ + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.HEpeTqnvm3 ++ mktemp + local LAST_ERR=/tmp/tmp.aeUh0Hm6ms + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HEpeTqnvm3 namespace/cert-manager created + cat /tmp/tmp.aeUh0Hm6ms + rm /tmp/tmp.HEpeTqnvm3 /tmp/tmp.aeUh0Hm6ms + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.iid7fRUVZl ++ mktemp + local LAST_ERR=/tmp/tmp.TS6wyAOjot + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iid7fRUVZl namespace/cert-manager labeled + cat /tmp/tmp.TS6wyAOjot + rm /tmp/tmp.iid7fRUVZl /tmp/tmp.TS6wyAOjot + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.PnnsCKSYh8 ++ mktemp + local LAST_ERR=/tmp/tmp.C6SxMD3HJs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PnnsCKSYh8 namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.C6SxMD3HJs Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.PnnsCKSYh8 /tmp/tmp.C6SxMD3HJs + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.OEaH82K7MF ++ mktemp + local LAST_ERR=/tmp/tmp.TXdMgvK9a1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OEaH82K7MF pod/cert-manager-cainjector-5dc9c8b4f7-x2dgm condition met pod/cert-manager-df4b69479-khdpb condition met pod/cert-manager-webhook-769bbb594d-68kkl condition met + cat /tmp/tmp.TXdMgvK9a1 + rm /tmp/tmp.OEaH82K7MF /tmp/tmp.TXdMgvK9a1 + return 0 + sleep 120 + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm3_server + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + helm repo remove percona "percona" has been removed from your repositories + kubectl delete clusterrole monitoring --ignore-not-found + kubectl delete clusterrolebinding monitoring --ignore-not-found + helm repo add percona https://percona.github.io/percona-helm-charts/ "percona" has been added to your repositories + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "percona" chart repository ...Successfully got an update from the "hashicorp" chart repository ...Successfully got an update from the "stable" chart repository Update Complete. ⎈Happy Helming!⎈ + [[ -n '' ]] + retry 10 60 helm install monitoring percona/pmm --set fullnameOverride=monitoring-server --set image.tag=3-dev-latest --set image.repository=perconalab/pmm-server --set service.type=LoadBalancer --force + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring percona/pmm --set fullnameOverride=monitoring-server --set image.tag=3-dev-latest --set image.repository=perconalab/pmm-server --set service.type=LoadBalancer --force NAME: monitoring LAST DEPLOYED: Mon Nov 10 07:14:08 2025 NAMESPACE: monitoring-pmm3-8293 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: Percona Monitoring and Management (PMM) An open source database monitoring, observability and management tool Check more info here: https://docs.percona.com/percona-monitoring-and-management/index.html Get the application URL: NOTE: It may take a few minutes for the LoadBalancer IP to be available. You can watch the status of by running 'kubectl get --namespace monitoring-pmm3-8293 svc -w monitoring-service' export SERVICE_IP=$(kubectl get svc --namespace monitoring-pmm3-8293 monitoring-service -o jsonpath="{.status.loadBalancer.ingress[0].ip}") echo https://$SERVICE_IP: Get password for the "admin" user: export ADMIN_PASS=$(kubectl get secret pmm-secret --namespace monitoring-pmm3-8293 -o jsonpath='{.data.PMM_ADMIN_PASSWORD}' | base64 --decode) echo $ADMIN_PASS + sleep 20 + kubectl_bin exec monitoring-server-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.UhKVcm1haP ++ mktemp + local LAST_ERR=/tmp/tmp.bXMK8fe6Yv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-server-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.UhKVcm1haP + cat /tmp/tmp.bXMK8fe6Yv error: Internal error occurred: unable to upgrade connection: container not found ("pmm") + sleep 0 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-server-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.UhKVcm1haP + cat /tmp/tmp.bXMK8fe6Yv error: Internal error occurred: unable to upgrade connection: container not found ("pmm") + sleep 4 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-server-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.UhKVcm1haP + cat /tmp/tmp.bXMK8fe6Yv error: Internal error occurred: unable to upgrade connection: container not found ("pmm") + sleep 8 + cat /tmp/tmp.UhKVcm1haP + cat /tmp/tmp.bXMK8fe6Yv error: Internal error occurred: unable to upgrade connection: container not found ("pmm") + rm /tmp/tmp.UhKVcm1haP /tmp/tmp.bXMK8fe6Yv + return 1 + echo 'Retry 0' Retry 0 + sleep 5 + let retry+=1 + '[' 1 -ge 20 ']' + kubectl_bin exec monitoring-server-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.SDEjL2IbBb ++ mktemp + local LAST_ERR=/tmp/tmp.Xcsnk8o3ct + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-server-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.SDEjL2IbBb + cat /tmp/tmp.Xcsnk8o3ct error: Internal error occurred: unable to upgrade connection: container not found ("pmm") + sleep 0 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-server-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.SDEjL2IbBb + cat /tmp/tmp.Xcsnk8o3ct error: Internal error occurred: unable to upgrade connection: container not found ("pmm") + sleep 4 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-server-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.SDEjL2IbBb + cat /tmp/tmp.Xcsnk8o3ct command terminated with exit code 1 + sleep 8 + cat /tmp/tmp.SDEjL2IbBb + cat /tmp/tmp.Xcsnk8o3ct command terminated with exit code 1 + rm /tmp/tmp.SDEjL2IbBb /tmp/tmp.Xcsnk8o3ct + return 1 + echo 'Retry 1' Retry 1 + sleep 5 + let retry+=1 + '[' 2 -ge 20 ']' + kubectl_bin exec monitoring-server-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.L2Yt4NXv7X ++ mktemp + local LAST_ERR=/tmp/tmp.zmcfe496k1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-server-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.L2Yt4NXv7X + cat /tmp/tmp.zmcfe496k1 + rm /tmp/tmp.L2Yt4NXv7X /tmp/tmp.zmcfe496k1 + return 0 + cluster=monitoring-pmm3 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.yXOarC8iKS ++ mktemp + local LAST_ERR=/tmp/tmp.SGkGHF10yl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yXOarC8iKS secret/some-users created secret/some-users unchanged + cat /tmp/tmp.SGkGHF10yl + rm /tmp/tmp.yXOarC8iKS /tmp/tmp.SGkGHF10yl + return 0 + kubectl_bin apply -f - + yq '.spec.template.spec.volumes[0].secret.secretName="monitoring-pmm3-ssl"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.9glF6JDKbe ++ mktemp + local LAST_ERR=/tmp/tmp.lG2XKjIGXb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9glF6JDKbe deployment.apps/psmdb-client created + cat /tmp/tmp.lG2XKjIGXb + rm /tmp/tmp.9glF6JDKbe /tmp/tmp.lG2XKjIGXb + return 0 + sleep 90 + desc 'create first PSMDB cluster monitoring-pmm3' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster monitoring-pmm3 ----------------------------------------------------------------------------------- + custom_cluster_name=super-custom + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/conf/monitoring-pmm3-rs0.yml + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:3-dev-latest"' - + yq eval '(.spec | select(has("pmm"))).pmm.customClusterName = "super-custom"' - + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1850-4eb33c10"' - + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' - + yq eval '.spec.upgradeOptions.apply = "Never"' - + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.u0tXFhFsxh ++ mktemp + local LAST_ERR=/tmp/tmp.L72lYgmD9v + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.u0tXFhFsxh perconaservermongodb.psmdb.percona.com/monitoring-pmm3 created + cat /tmp/tmp.L72lYgmD9v + rm /tmp/tmp.u0tXFhFsxh /tmp/tmp.L72lYgmD9v + return 0 + wait_for_running monitoring-pmm3-rs0 3 + local name=monitoring-pmm3-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring-pmm3 ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-pmm3-rs0-0 + local pod=monitoring-pmm3-rs0-0 + set +o xtrace waiting for pod/monitoring-pmm3-rs0-0 to be ready...............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-pmm3-rs0-1 + local pod=monitoring-pmm3-rs0-1 + set +o xtrace waiting for pod/monitoring-pmm3-rs0-1 to be ready............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zAZXMnvw4u +++ mktemp ++ local LAST_ERR=/tmp/tmp.9pIzozSIdq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zAZXMnvw4u ++ cat /tmp/tmp.9pIzozSIdq ++ rm /tmp/tmp.zAZXMnvw4u /tmp/tmp.9pIzozSIdq ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-pmm3-rs0-2 + local pod=monitoring-pmm3-rs0-2 + set +o xtrace waiting for pod/monitoring-pmm3-rs0-2 to be ready.............OK ++ kubectl_bin get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ba2Ztz2DvO +++ mktemp ++ local LAST_ERR=/tmp/tmp.FoKHpoI5Vp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ba2Ztz2DvO ++ cat /tmp/tmp.FoKHpoI5Vp ++ rm /tmp/tmp.Ba2Ztz2DvO /tmp/tmp.FoKHpoI5Vp ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pSFTqIWNQV +++ mktemp ++ local LAST_ERR=/tmp/tmp.Eob2k28FUj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pSFTqIWNQV ++ cat /tmp/tmp.Eob2k28FUj ++ rm /tmp/tmp.pSFTqIWNQV /tmp/tmp.Eob2k28FUj ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness........ + desc 'check if pmm-client container is not enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container is not enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-pmm3-rs0 -no-pmm + local resource=statefulset/monitoring-pmm3-rs0 + local postfix=-no-pmm + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-rs0-no-pmm.yml + local new_result=/tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-rs0-no-pmm-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-pmm3-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-8293", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.EK2cQIDm90 ++ mktemp + local LAST_ERR=/tmp/tmp.MONDvNBMJ9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-pmm3-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EK2cQIDm90 + cat /tmp/tmp.MONDvNBMJ9 + rm /tmp/tmp.EK2cQIDm90 /tmp/tmp.MONDvNBMJ9 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-rs0-no-pmm.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-rs0-no-pmm.yml /tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-rs0.yml + log 'compare_kubectl: statefulset/monitoring-pmm3-rs0 OK' + set +o xtrace [2025-11-10T07:18:45+0000] compare_kubectl: statefulset/monitoring-pmm3-rs0 OK + sleep 10 + custom_port=27019 + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@monitoring-pmm3-mongos.monitoring-pmm3-8293 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@monitoring-pmm3-mongos.monitoring-pmm3-8293 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sFWe7tug8I +++ mktemp ++ local LAST_ERR=/tmp/tmp.9hy3SwUEpD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sFWe7tug8I ++ cat /tmp/tmp.9hy3SwUEpD ++ rm /tmp/tmp.sFWe7tug8I /tmp/tmp.9hy3SwUEpD ++ return 0 + local client_container=psmdb-client-7568665d56-5z8z9 + kubectl_bin exec psmdb-client-7568665d56-5z8z9 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-pmm3-mongos.monitoring-pmm3-8293.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.RdS45FMnsU ++ mktemp + local LAST_ERR=/tmp/tmp.4iJaFxNxnD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-7568665d56-5z8z9 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-pmm3-mongos.monitoring-pmm3-8293.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RdS45FMnsU Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-pmm3-mongos.monitoring-pmm3-8293.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2025-11-10T07:18:58.096Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("dd1c4f70-ba2d-4638-bd87-4a05c9bba4be") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.4iJaFxNxnD + rm /tmp/tmp.RdS45FMnsU /tmp/tmp.4iJaFxNxnD + return 0 + run_mongos 'sh.enableSharding("myApp")' clusterAdmin:clusterAdmin123456@monitoring-pmm3-mongos.monitoring-pmm3-8293 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=sh.enableSharding("myApp")' + local uri=clusterAdmin:clusterAdmin123456@monitoring-pmm3-mongos.monitoring-pmm3-8293 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QVh6qKjVii +++ mktemp ++ local LAST_ERR=/tmp/tmp.A0ChkahMXc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QVh6qKjVii ++ cat /tmp/tmp.A0ChkahMXc ++ rm /tmp/tmp.QVh6qKjVii /tmp/tmp.A0ChkahMXc ++ return 0 + local client_container=psmdb-client-7568665d56-5z8z9 + kubectl_bin exec psmdb-client-7568665d56-5z8z9 -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-pmm3-mongos.monitoring-pmm3-8293.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.I2AJTBqe8c ++ mktemp + local LAST_ERR=/tmp/tmp.163CnhZjBR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-7568665d56-5z8z9 -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-pmm3-mongos.monitoring-pmm3-8293.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I2AJTBqe8c Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-pmm3-mongos.monitoring-pmm3-8293.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2025-11-10T07:19:00.142Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("77f30687-aacf-4f2a-88eb-cba810436ae6") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1762759140, 8), "signature" : { "hash" : BinData(0,"4dO4UrRDdLHLi0ClHifGgL+A+qI="), "keyId" : NumberLong("7570992599327047704") } }, "operationTime" : Timestamp(1762759140, 5) } bye + cat /tmp/tmp.163CnhZjBR + rm /tmp/tmp.I2AJTBqe8c /tmp/tmp.163CnhZjBR + return 0 + insert_data_mongos 100500 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100500 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-8293 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-8293 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5hVIBayl03 +++ mktemp ++ local LAST_ERR=/tmp/tmp.VObSVlLpah ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5hVIBayl03 ++ cat /tmp/tmp.VObSVlLpah ++ rm /tmp/tmp.5hVIBayl03 /tmp/tmp.VObSVlLpah ++ return 0 + local client_container=psmdb-client-7568665d56-5z8z9 + kubectl_bin exec psmdb-client-7568665d56-5z8z9 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-8293.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.DXSH3AwFUJ ++ mktemp + local LAST_ERR=/tmp/tmp.roO9YTDOIM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-7568665d56-5z8z9 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-8293.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DXSH3AwFUJ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-pmm3-mongos.monitoring-pmm3-8293.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2025-11-10T07:19:02.239Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("7f5927f2-a7c4-4119-842e-c26817fe6d45") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.roO9YTDOIM + rm /tmp/tmp.DXSH3AwFUJ /tmp/tmp.roO9YTDOIM + return 0 + insert_data_mongos 100600 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100600 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100600 })' myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-8293 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100600 })' + local uri=myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-8293 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QM6cZWSLic +++ mktemp ++ local LAST_ERR=/tmp/tmp.mnZOHV3xbO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QM6cZWSLic ++ cat /tmp/tmp.mnZOHV3xbO ++ rm /tmp/tmp.QM6cZWSLic /tmp/tmp.mnZOHV3xbO ++ return 0 + local client_container=psmdb-client-7568665d56-5z8z9 + kubectl_bin exec psmdb-client-7568665d56-5z8z9 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-8293.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.qS3wXKPRAy ++ mktemp + local LAST_ERR=/tmp/tmp.jvsWzIwTlv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-7568665d56-5z8z9 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-8293.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qS3wXKPRAy Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-pmm3-mongos.monitoring-pmm3-8293.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2025-11-10T07:19:04.431Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("459117ee-e54c-458d-8bc1-11cc295af938") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.jvsWzIwTlv + rm /tmp/tmp.qS3wXKPRAy /tmp/tmp.jvsWzIwTlv + return 0 + insert_data_mongos 100700 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100700 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100700 })' myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-8293 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100700 })' + local uri=myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-8293 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yguOGG8N4b +++ mktemp ++ local LAST_ERR=/tmp/tmp.AFEU3OLoKr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yguOGG8N4b ++ cat /tmp/tmp.AFEU3OLoKr ++ rm /tmp/tmp.yguOGG8N4b /tmp/tmp.AFEU3OLoKr ++ return 0 + local client_container=psmdb-client-7568665d56-5z8z9 + kubectl_bin exec psmdb-client-7568665d56-5z8z9 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-8293.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.08JpWRB8H6 ++ mktemp + local LAST_ERR=/tmp/tmp.uBcfaMI05D + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-7568665d56-5z8z9 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-pmm3-mongos.monitoring-pmm3-8293.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.08JpWRB8H6 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-pmm3-mongos.monitoring-pmm3-8293.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2025-11-10T07:19:06.587Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("9de4851f-380d-4bed-b952-09ce3723e060") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.uBcfaMI05D + rm /tmp/tmp.08JpWRB8H6 /tmp/tmp.uBcfaMI05D + return 0 + desc 'add PMM3 token to secret' + set +o xtrace ----------------------------------------------------------------------------------- add PMM3 token to secret ----------------------------------------------------------------------------------- ++ get_pmm_server_token operator ++ local key_name=operator ++ [[ -z operator ]] ++ local ADMIN_PASSWORD +++ kubectl get secret pmm-secret -o 'jsonpath={.data.PMM_ADMIN_PASSWORD}' +++ base64 --decode ++ ADMIN_PASSWORD='j+uB(qx7<7^ s+0@' ++ [[ -z j+uB(qx7<7^ s+0@ ]] ++ local create_response create_status_code create_json_response ++++ get_service_endpoint monitoring-service ++++ local service=monitoring-service +++++ kubectl_bin get service/monitoring-service -o json +++++ jq '.status.loadBalancer.ingress[].hostname' +++++ sed -e 's/^"//; s/"$//;' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.qUR3cSy9rm ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.SAa3wXywAl +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in $(seq 0 2) +++++ set +e +++++ kubectl get service/monitoring-service -o json +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 -a -n 1 ']' +++++ break +++++ cat /tmp/tmp.qUR3cSy9rm +++++ cat /tmp/tmp.SAa3wXywAl +++++ rm /tmp/tmp.qUR3cSy9rm /tmp/tmp.SAa3wXywAl +++++ return 0 ++++ local hostname=null ++++ '[' -n null -a null '!=' null ']' +++++ kubectl_bin get service/monitoring-service -o json +++++ jq '.status.loadBalancer.ingress[].ip' +++++ sed -e 's/^"//; s/"$//;' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.t9vT87naIg ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.PMQzvhx9Pp +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in $(seq 0 2) +++++ set +e +++++ kubectl get service/monitoring-service -o json +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 -a -n 1 ']' +++++ break +++++ cat /tmp/tmp.t9vT87naIg +++++ cat /tmp/tmp.PMQzvhx9Pp +++++ rm /tmp/tmp.t9vT87naIg /tmp/tmp.PMQzvhx9Pp +++++ return 0 ++++ local ip=35.226.246.51 ++++ '[' -n 35.226.246.51 -a 35.226.246.51 '!=' null ']' ++++ echo 35.226.246.51 ++++ return +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -d '{"name":"operator", "role":"Admin", "isDisabled":false}' --user 'admin:j+uB(qx7<7^ s+0@' https://35.226.246.51/graph/api/serviceaccounts -w '\n%{http_code}' ++ create_response='{"id":2,"uid":"df3ohxkpdoef4c","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ echo '{"id":2,"uid":"df3ohxkpdoef4c","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ tail -n1 ++ create_status_code=201 +++ echo '{"id":2,"uid":"df3ohxkpdoef4c","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ sed '$ d' ++ create_json_response='{"id":2,"uid":"df3ohxkpdoef4c","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' ++ [[ 201 -ne 201 ]] ++ local service_account_id +++ echo '{"id":2,"uid":"df3ohxkpdoef4c","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' +++ jq -r .id ++ service_account_id=2 ++ [[ -z 2 ]] ++ [[ 2 == \n\u\l\l ]] ++ local token_response token_status_code token_json_response ++++ get_service_endpoint monitoring-service ++++ local service=monitoring-service +++++ kubectl_bin get service/monitoring-service -o json +++++ jq '.status.loadBalancer.ingress[].hostname' ++++++ mktemp +++++ sed -e 's/^"//; s/"$//;' +++++ local LAST_OUT=/tmp/tmp.5MBQfyyhI7 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.zNFF6p24lG +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in $(seq 0 2) +++++ set +e +++++ kubectl get service/monitoring-service -o json +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 -a -n 1 ']' +++++ break +++++ cat /tmp/tmp.5MBQfyyhI7 +++++ cat /tmp/tmp.zNFF6p24lG +++++ rm /tmp/tmp.5MBQfyyhI7 /tmp/tmp.zNFF6p24lG +++++ return 0 ++++ local hostname=null ++++ '[' -n null -a null '!=' null ']' +++++ kubectl_bin get service/monitoring-service -o json +++++ jq '.status.loadBalancer.ingress[].ip' +++++ sed -e 's/^"//; s/"$//;' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.rVqE4S3U1j ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.MpII0tpN3H +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in $(seq 0 2) +++++ set +e +++++ kubectl get service/monitoring-service -o json +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 -a -n 1 ']' +++++ break +++++ cat /tmp/tmp.rVqE4S3U1j +++++ cat /tmp/tmp.MpII0tpN3H +++++ rm /tmp/tmp.rVqE4S3U1j /tmp/tmp.MpII0tpN3H +++++ return 0 ++++ local ip=35.226.246.51 ++++ '[' -n 35.226.246.51 -a 35.226.246.51 '!=' null ']' ++++ echo 35.226.246.51 ++++ return +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -d '{"name":"operator"}' --user 'admin:j+uB(qx7<7^ s+0@' https://35.226.246.51/graph/api/serviceaccounts/2/tokens -w '\n%{http_code}' ++ token_response='{"id":1,"name":"operator","key":"glsa_cRCojUdr1dEuCCvB53TVbPIJ95nlezgP_e2e434ce"} 200' +++ echo '{"id":1,"name":"operator","key":"glsa_cRCojUdr1dEuCCvB53TVbPIJ95nlezgP_e2e434ce"} 200' +++ tail -n1 ++ token_status_code=200 +++ echo '{"id":1,"name":"operator","key":"glsa_cRCojUdr1dEuCCvB53TVbPIJ95nlezgP_e2e434ce"} 200' +++ sed '$ d' ++ token_json_response='{"id":1,"name":"operator","key":"glsa_cRCojUdr1dEuCCvB53TVbPIJ95nlezgP_e2e434ce"}' ++ [[ 200 -ne 200 ]] ++ echo '{"id":1,"name":"operator","key":"glsa_cRCojUdr1dEuCCvB53TVbPIJ95nlezgP_e2e434ce"}' ++ jq -r .key + TOKEN=glsa_cRCojUdr1dEuCCvB53TVbPIJ95nlezgP_e2e434ce + kubectl_bin patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_TOKEN": "glsa_cRCojUdr1dEuCCvB53TVbPIJ95nlezgP_e2e434ce"}}' ++ mktemp + local LAST_OUT=/tmp/tmp.rpNql9TGKL ++ mktemp + local LAST_ERR=/tmp/tmp.pbwJ5izE8o + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_TOKEN": "glsa_cRCojUdr1dEuCCvB53TVbPIJ95nlezgP_e2e434ce"}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rpNql9TGKL secret/some-users patched + cat /tmp/tmp.pbwJ5izE8o + rm /tmp/tmp.rpNql9TGKL /tmp/tmp.pbwJ5izE8o + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running monitoring-pmm3-rs0 3 + local name=monitoring-pmm3-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring-pmm3 ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-pmm3-rs0-0 + local pod=monitoring-pmm3-rs0-0 + set +o xtrace waiting for pod/monitoring-pmm3-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-pmm3-rs0-1 + local pod=monitoring-pmm3-rs0-1 + set +o xtrace waiting for pod/monitoring-pmm3-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iUPzUvohv9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.to6z85MIIr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iUPzUvohv9 ++ cat /tmp/tmp.to6z85MIIr ++ rm /tmp/tmp.iUPzUvohv9 /tmp/tmp.to6z85MIIr ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-pmm3-rs0-2 + local pod=monitoring-pmm3-rs0-2 + set +o xtrace waiting for pod/monitoring-pmm3-rs0-2 to be ready.OK ++ kubectl_bin get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aN1JnDD1S4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.BzbREttCaL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aN1JnDD1S4 ++ cat /tmp/tmp.BzbREttCaL ++ rm /tmp/tmp.aN1JnDD1S4 /tmp/tmp.BzbREttCaL ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.89Yte4sKXy +++ mktemp ++ local LAST_ERR=/tmp/tmp.A8k4TIqWLq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring-pmm3 -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.89Yte4sKXy ++ cat /tmp/tmp.A8k4TIqWLq ++ rm /tmp/tmp.89Yte4sKXy /tmp/tmp.A8k4TIqWLq ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness............................................................................................................................................................. + sleep 90 + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-pmm3-rs0 + local resource=statefulset/monitoring-pmm3-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-rs0.yml + local new_result=/tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-pmm3-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-8293", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | ++ mktemp (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.SLDEIDYPU4 ++ mktemp + local LAST_ERR=/tmp/tmp.mZMJMP5vaI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-pmm3-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SLDEIDYPU4 + cat /tmp/tmp.mZMJMP5vaI + rm /tmp/tmp.SLDEIDYPU4 /tmp/tmp.mZMJMP5vaI + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-rs0.yml /tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-rs0.yml + log 'compare_kubectl: statefulset/monitoring-pmm3-rs0 OK' + set +o xtrace [2025-11-10T07:26:01+0000] compare_kubectl: statefulset/monitoring-pmm3-rs0 OK + compare_kubectl service/monitoring-pmm3-rs0 + local resource=service/monitoring-pmm3-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/service_monitoring-pmm3-rs0.yml + local new_result=/tmp/tmp.68JekPG774/service_monitoring-pmm3-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/service_monitoring-pmm3-rs0-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-pmm3-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-8293", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.IYM3HGNuYt ++ mktemp + local LAST_ERR=/tmp/tmp.oUu6qhmhat + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/monitoring-pmm3-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IYM3HGNuYt + cat /tmp/tmp.oUu6qhmhat + rm /tmp/tmp.IYM3HGNuYt /tmp/tmp.oUu6qhmhat + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.68JekPG774/service_monitoring-pmm3-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.68JekPG774/service_monitoring-pmm3-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.68JekPG774/service_monitoring-pmm3-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/service_monitoring-pmm3-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/service_monitoring-pmm3-rs0.yml /tmp/tmp.68JekPG774/service_monitoring-pmm3-rs0.yml + log 'compare_kubectl: service/monitoring-pmm3-rs0 OK' + set +o xtrace [2025-11-10T07:26:02+0000] compare_kubectl: service/monitoring-pmm3-rs0 OK + compare_kubectl service/monitoring-pmm3-mongos + local resource=service/monitoring-pmm3-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/service_monitoring-pmm3-mongos.yml + local new_result=/tmp/tmp.68JekPG774/service_monitoring-pmm3-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/service_monitoring-pmm3-mongos-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-pmm3-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-8293", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.kGqMNwuvt5 ++ mktemp + local LAST_ERR=/tmp/tmp.bSBZu5VGWk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/monitoring-pmm3-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kGqMNwuvt5 + cat /tmp/tmp.bSBZu5VGWk + rm /tmp/tmp.kGqMNwuvt5 /tmp/tmp.bSBZu5VGWk + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.68JekPG774/service_monitoring-pmm3-mongos.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.68JekPG774/service_monitoring-pmm3-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.68JekPG774/service_monitoring-pmm3-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/service_monitoring-pmm3-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/service_monitoring-pmm3-mongos.yml /tmp/tmp.68JekPG774/service_monitoring-pmm3-mongos.yml + log 'compare_kubectl: service/monitoring-pmm3-mongos OK' + set +o xtrace [2025-11-10T07:26:03+0000] compare_kubectl: service/monitoring-pmm3-mongos OK + compare_kubectl statefulset/monitoring-pmm3-cfg + local resource=statefulset/monitoring-pmm3-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-cfg.yml + local new_result=/tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-pmm3-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-8293", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.6vrgclce8w ++ mktemp + local LAST_ERR=/tmp/tmp.XttvSxn1hn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-pmm3-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6vrgclce8w + cat /tmp/tmp.XttvSxn1hn + rm /tmp/tmp.6vrgclce8w /tmp/tmp.XttvSxn1hn + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-cfg.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-cfg.yml /tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-cfg.yml + log 'compare_kubectl: statefulset/monitoring-pmm3-cfg OK' + set +o xtrace [2025-11-10T07:26:04+0000] compare_kubectl: statefulset/monitoring-pmm3-cfg OK + compare_kubectl statefulset/monitoring-pmm3-mongos + local resource=statefulset/monitoring-pmm3-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-mongos.yml + local new_result=/tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-pmm3-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-8293", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.KEIpAA7PhR ++ mktemp + local LAST_ERR=/tmp/tmp.faZIxbAmC3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-pmm3-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KEIpAA7PhR + cat /tmp/tmp.faZIxbAmC3 + rm /tmp/tmp.KEIpAA7PhR /tmp/tmp.faZIxbAmC3 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-mongos.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pmm3-mongos.yml /tmp/tmp.68JekPG774/statefulset_monitoring-pmm3-mongos.yml + log 'compare_kubectl: statefulset/monitoring-pmm3-mongos OK' + set +o xtrace [2025-11-10T07:26:05+0000] compare_kubectl: statefulset/monitoring-pmm3-mongos OK + desc 'create new PMM token and add it to the secret' + set +o xtrace ----------------------------------------------------------------------------------- create new PMM token and add it to the secret ----------------------------------------------------------------------------------- ++ get_pmm_server_token operator_new ++ local key_name=operator_new ++ [[ -z operator_new ]] ++ local ADMIN_PASSWORD +++ kubectl get secret pmm-secret -o 'jsonpath={.data.PMM_ADMIN_PASSWORD}' +++ base64 --decode ++ ADMIN_PASSWORD='j+uB(qx7<7^ s+0@' ++ [[ -z j+uB(qx7<7^ s+0@ ]] ++ local create_response create_status_code create_json_response ++++ get_service_endpoint monitoring-service ++++ local service=monitoring-service +++++ kubectl_bin get service/monitoring-service -o json +++++ jq '.status.loadBalancer.ingress[].hostname' +++++ sed -e 's/^"//; s/"$//;' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.VVs3uzUfQ6 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.7VFTqEtSAB +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in $(seq 0 2) +++++ set +e +++++ kubectl get service/monitoring-service -o json +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 -a -n 1 ']' +++++ break +++++ cat /tmp/tmp.VVs3uzUfQ6 +++++ cat /tmp/tmp.7VFTqEtSAB +++++ rm /tmp/tmp.VVs3uzUfQ6 /tmp/tmp.7VFTqEtSAB +++++ return 0 ++++ local hostname=null ++++ '[' -n null -a null '!=' null ']' +++++ kubectl_bin get service/monitoring-service -o json +++++ jq '.status.loadBalancer.ingress[].ip' +++++ sed -e 's/^"//; s/"$//;' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.Z9CfdeK2uQ ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.S5jyvmk9LT +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in $(seq 0 2) +++++ set +e +++++ kubectl get service/monitoring-service -o json +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 -a -n 1 ']' +++++ break +++++ cat /tmp/tmp.Z9CfdeK2uQ +++++ cat /tmp/tmp.S5jyvmk9LT +++++ rm /tmp/tmp.Z9CfdeK2uQ /tmp/tmp.S5jyvmk9LT +++++ return 0 ++++ local ip=35.226.246.51 ++++ '[' -n 35.226.246.51 -a 35.226.246.51 '!=' null ']' ++++ echo 35.226.246.51 ++++ return +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -d '{"name":"operator_new", "role":"Admin", "isDisabled":false}' --user 'admin:j+uB(qx7<7^ s+0@' https://35.226.246.51/graph/api/serviceaccounts -w '\n%{http_code}' ++ create_response='{"id":3,"uid":"ef3oik0lox14we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ echo '{"id":3,"uid":"ef3oik0lox14we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ tail -n1 ++ create_status_code=201 +++ echo '{"id":3,"uid":"ef3oik0lox14we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ sed '$ d' ++ create_json_response='{"id":3,"uid":"ef3oik0lox14we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' ++ [[ 201 -ne 201 ]] ++ local service_account_id +++ echo '{"id":3,"uid":"ef3oik0lox14we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' +++ jq -r .id ++ service_account_id=3 ++ [[ -z 3 ]] ++ [[ 3 == \n\u\l\l ]] ++ local token_response token_status_code token_json_response ++++ get_service_endpoint monitoring-service ++++ local service=monitoring-service +++++ kubectl_bin get service/monitoring-service -o json +++++ jq '.status.loadBalancer.ingress[].hostname' +++++ sed -e 's/^"//; s/"$//;' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.D58y44Erth ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.2PSFVzagdY +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in $(seq 0 2) +++++ set +e +++++ kubectl get service/monitoring-service -o json +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 -a -n 1 ']' +++++ break +++++ cat /tmp/tmp.D58y44Erth +++++ cat /tmp/tmp.2PSFVzagdY +++++ rm /tmp/tmp.D58y44Erth /tmp/tmp.2PSFVzagdY +++++ return 0 ++++ local hostname=null ++++ '[' -n null -a null '!=' null ']' +++++ jq '.status.loadBalancer.ingress[].ip' +++++ sed -e 's/^"//; s/"$//;' +++++ kubectl_bin get service/monitoring-service -o json ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.8JSDRXJPkU ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.fZGGiW5egJ +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in $(seq 0 2) +++++ set +e +++++ kubectl get service/monitoring-service -o json +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 -a -n 1 ']' +++++ break +++++ cat /tmp/tmp.8JSDRXJPkU +++++ cat /tmp/tmp.fZGGiW5egJ +++++ rm /tmp/tmp.8JSDRXJPkU /tmp/tmp.fZGGiW5egJ +++++ return 0 ++++ local ip=35.226.246.51 ++++ '[' -n 35.226.246.51 -a 35.226.246.51 '!=' null ']' ++++ echo 35.226.246.51 ++++ return +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -d '{"name":"operator_new"}' --user 'admin:j+uB(qx7<7^ s+0@' https://35.226.246.51/graph/api/serviceaccounts/3/tokens -w '\n%{http_code}' ++ token_response='{"id":2,"name":"operator_new","key":"glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33"} 200' +++ echo '{"id":2,"name":"operator_new","key":"glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33"} 200' +++ tail -n1 ++ token_status_code=200 +++ echo '{"id":2,"name":"operator_new","key":"glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33"} 200' +++ sed '$ d' ++ token_json_response='{"id":2,"name":"operator_new","key":"glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33"}' ++ [[ 200 -ne 200 ]] ++ echo '{"id":2,"name":"operator_new","key":"glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33"}' ++ jq -r .key + NEW_TOKEN=glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 + kubectl_bin patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_TOKEN": "glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33"}}' ++ mktemp + local LAST_OUT=/tmp/tmp.fwiSkbU4Fb ++ mktemp + local LAST_ERR=/tmp/tmp.FudyGPzztY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_TOKEN": "glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33"}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fwiSkbU4Fb secret/some-users patched + cat /tmp/tmp.FudyGPzztY + rm /tmp/tmp.fwiSkbU4Fb /tmp/tmp.FudyGPzztY + return 0 + desc 'delete old PMM token' + set +o xtrace ----------------------------------------------------------------------------------- delete old PMM token ----------------------------------------------------------------------------------- + delete_pmm_server_token operator + local key_name=operator + [[ -z operator ]] + local ADMIN_PASSWORD ++ kubectl get secret pmm-secret -o 'jsonpath={.data.PMM_ADMIN_PASSWORD}' ++ base64 --decode + ADMIN_PASSWORD='j+uB(qx7<7^ s+0@' + [[ -z j+uB(qx7<7^ s+0@ ]] + local 'user_credentials=admin:j+uB(qx7<7^ s+0@' + local service_accounts_response service_accounts_status +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.0nqwzvvcSm +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.qwedISGARk ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.0nqwzvvcSm ++++ cat /tmp/tmp.qwedISGARk ++++ rm /tmp/tmp.0nqwzvvcSm /tmp/tmp.qwedISGARk ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.nMSlXygC8H +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.JJodbF74Ux ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.nMSlXygC8H ++++ cat /tmp/tmp.JJodbF74Ux ++++ rm /tmp/tmp.nMSlXygC8H /tmp/tmp.JJodbF74Ux ++++ return 0 +++ local ip=35.226.246.51 +++ '[' -n 35.226.246.51 -a 35.226.246.51 '!=' null ']' +++ echo 35.226.246.51 +++ return ++ curl --insecure -s -X GET --user 'admin:j+uB(qx7<7^ s+0@' https://35.226.246.51/graph/api/serviceaccounts/search -w '\n%{http_code}' + service_accounts_response='{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"df3ohxkpdoef4c","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"ef3oik0lox14we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000} 200' ++ echo '{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"df3ohxkpdoef4c","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"ef3oik0lox14we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000} 200' ++ tail -n1 + service_accounts_status=200 ++ echo '{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"df3ohxkpdoef4c","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"ef3oik0lox14we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000} 200' ++ sed '$ d' + service_accounts_json='{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"df3ohxkpdoef4c","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"ef3oik0lox14we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000}' + [[ 200 -ne 200 ]] + local service_account_id ++ echo '{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"df3ohxkpdoef4c","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"ef3oik0lox14we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000}' ++ jq -r '.serviceAccounts[] | select(.name == "operator").id' + service_account_id=2 + [[ -z 2 ]] + [[ 2 == \n\u\l\l ]] + local tokens_response tokens_status tokens_json +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ sed -e 's/^"//; s/"$//;' ++++ jq '.status.loadBalancer.ingress[].hostname' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.VLjHz32Ojm +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.HSCbGKJRIy ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.VLjHz32Ojm ++++ cat /tmp/tmp.HSCbGKJRIy ++++ rm /tmp/tmp.VLjHz32Ojm /tmp/tmp.HSCbGKJRIy ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.dgYcAXWaoQ +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.4r82A0tsVL ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.dgYcAXWaoQ ++++ cat /tmp/tmp.4r82A0tsVL ++++ rm /tmp/tmp.dgYcAXWaoQ /tmp/tmp.4r82A0tsVL ++++ return 0 +++ local ip=35.226.246.51 +++ '[' -n 35.226.246.51 -a 35.226.246.51 '!=' null ']' +++ echo 35.226.246.51 +++ return ++ curl --insecure -s -X GET --user 'admin:j+uB(qx7<7^ s+0@' https://35.226.246.51/graph/api/serviceaccounts/2/tokens -w '\n%{http_code}' + tokens_response='[{"id":1,"name":"operator","created":"2025-11-10T07:19:11Z","lastUsedAt":"2025-11-10T07:24:45Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}] 200' ++ echo '[{"id":1,"name":"operator","created":"2025-11-10T07:19:11Z","lastUsedAt":"2025-11-10T07:24:45Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}] 200' ++ tail -n1 + tokens_status=200 ++ echo '[{"id":1,"name":"operator","created":"2025-11-10T07:19:11Z","lastUsedAt":"2025-11-10T07:24:45Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}] 200' ++ sed '$ d' + tokens_json='[{"id":1,"name":"operator","created":"2025-11-10T07:19:11Z","lastUsedAt":"2025-11-10T07:24:45Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}]' + [[ 200 -ne 200 ]] + local token_id ++ echo '[{"id":1,"name":"operator","created":"2025-11-10T07:19:11Z","lastUsedAt":"2025-11-10T07:24:45Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}]' ++ jq -r '.[] | select(.name == "operator").id' + token_id=1 + [[ -z 1 ]] + [[ 1 == \n\u\l\l ]] + local delete_response delete_status +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.s3XA9yta9o +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.jCPYt88hgE ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.s3XA9yta9o ++++ cat /tmp/tmp.jCPYt88hgE ++++ rm /tmp/tmp.s3XA9yta9o /tmp/tmp.jCPYt88hgE ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.nSBkKcLkfD +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.tFkDCQM8B7 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.nSBkKcLkfD ++++ cat /tmp/tmp.tFkDCQM8B7 ++++ rm /tmp/tmp.nSBkKcLkfD /tmp/tmp.tFkDCQM8B7 ++++ return 0 +++ local ip=35.226.246.51 +++ '[' -n 35.226.246.51 -a 35.226.246.51 '!=' null ']' +++ echo 35.226.246.51 +++ return ++ curl --insecure -s -X DELETE --user 'admin:j+uB(qx7<7^ s+0@' https://35.226.246.51/graph/api/serviceaccounts/2/tokens/1 -w '\n%{http_code}' + delete_response='{"message":"Service account token deleted"} 200' ++ tail -n1 ++ echo '{"message":"Service account token deleted"} 200' + delete_status=200 + [[ 200 -ne 200 ]] + desc 'check for authentication errors in PMM client logs' + set +o xtrace ----------------------------------------------------------------------------------- check for authentication errors in PMM client logs ----------------------------------------------------------------------------------- ++ kubectl get pods --selector=app.kubernetes.io/replset=rs0 -o 'jsonpath={.items[*].metadata.name}' + pods='monitoring-pmm3-rs0-0 monitoring-pmm3-rs0-1 monitoring-pmm3-rs0-2' + for pod in $pods + for i in {1..3} + kubectl logs monitoring-pmm3-rs0-0 pmm-client + grep -q 'Invalid username or password' + sleep 2 + for i in {1..3} + kubectl logs monitoring-pmm3-rs0-0 pmm-client + grep -q 'Invalid username or password' + sleep 2 + for i in {1..3} + kubectl logs monitoring-pmm3-rs0-0 pmm-client + grep -q 'Invalid username or password' + sleep 2 + for pod in $pods + for i in {1..3} + kubectl logs monitoring-pmm3-rs0-1 pmm-client + grep -q 'Invalid username or password' + sleep 2 + for i in {1..3} + kubectl logs monitoring-pmm3-rs0-1 pmm-client + grep -q 'Invalid username or password' + sleep 2 + for i in {1..3} + kubectl logs monitoring-pmm3-rs0-1 pmm-client + grep -q 'Invalid username or password' + sleep 2 + for pod in $pods + for i in {1..3} + kubectl logs monitoring-pmm3-rs0-2 pmm-client + grep -q 'Invalid username or password' + sleep 2 + for i in {1..3} + kubectl logs monitoring-pmm3-rs0-2 pmm-client + grep -q 'Invalid username or password' + sleep 2 + for i in {1..3} + kubectl logs monitoring-pmm3-rs0-2 pmm-client + grep -q 'Invalid username or password' + sleep 2 + desc 'check mongod metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongod metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-pmm3-8293-monitoring-pmm3-rs0-1 glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 + local metric=node_boot_time_seconds + local instance=monitoring-pmm3-8293-monitoring-pmm3-rs0-1 + local token=glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1762759550 ++ /usr/sbin/date -u +%s + local end=1762759610 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ jq '.status.loadBalancer.ingress[].hostname' +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.laik4vnpHm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vwSPApP7Ig +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.laik4vnpHm +++ cat /tmp/tmp.vwSPApP7Ig +++ rm /tmp/tmp.laik4vnpHm /tmp/tmp.vwSPApP7Ig +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GAKn1qNVHO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PpR0flVdTw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.GAKn1qNVHO +++ cat /tmp/tmp.PpR0flVdTw +++ rm /tmp/tmp.GAKn1qNVHO /tmp/tmp.PpR0flVdTw +++ return 0 ++ local ip=35.226.246.51 ++ '[' -n 35.226.246.51 -a 35.226.246.51 '!=' null ']' ++ echo 35.226.246.51 ++ return + local endpoint=35.226.246.51 + '[' -z node_boot_time_seconds ']' + '[' -z glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 ']' + local wait_count=30 + local retry=0 ++ curl -s -k -H 'Authorization: Bearer glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33' 'https://35.226.246.51/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-pmm3-8293-monitoring-pmm3-rs0-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-pmm3-8293-monitoring-pmm3-rs0-1%22%7D%29&start=1762759550&end=1762759610&step=60' ++ jq '.data.result[0].values[][1]' ++ grep '^"[0-9]' + [[ -n "1762754239" "1762754239" ]] + get_metric_values mongodb_connections monitoring-pmm3-8293-monitoring-pmm3-rs0-1 glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 + local metric=mongodb_connections + local instance=monitoring-pmm3-8293-monitoring-pmm3-rs0-1 + local token=glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1762759552 ++ /usr/sbin/date -u +%s + local end=1762759612 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.J4q7YN4UcQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3U7jnlYGNI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.J4q7YN4UcQ +++ cat /tmp/tmp.3U7jnlYGNI +++ rm /tmp/tmp.J4q7YN4UcQ /tmp/tmp.3U7jnlYGNI +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.hsCl4FzBNX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.elh3QYOY5t +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.hsCl4FzBNX +++ cat /tmp/tmp.elh3QYOY5t +++ rm /tmp/tmp.hsCl4FzBNX /tmp/tmp.elh3QYOY5t +++ return 0 ++ local ip=35.226.246.51 ++ '[' -n 35.226.246.51 -a 35.226.246.51 '!=' null ']' ++ echo 35.226.246.51 ++ return + local endpoint=35.226.246.51 + '[' -z mongodb_connections ']' + '[' -z glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 ']' + local wait_count=30 + local retry=0 ++ curl -s -k -H 'Authorization: Bearer glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33' 'https://35.226.246.51/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-pmm3-8293-monitoring-pmm3-rs0-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-pmm3-8293-monitoring-pmm3-rs0-1%22%7D%29&start=1762759552&end=1762759612&step=60' ++ jq '.data.result[0].values[][1]' ++ grep '^"[0-9]' + [[ -n "0" "0" ]] + desc 'check mongo config metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongo config metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-pmm3-8293-monitoring-pmm3-cfg-1 glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 + local metric=node_boot_time_seconds + local instance=monitoring-pmm3-8293-monitoring-pmm3-cfg-1 + local token=glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1762759554 ++ /usr/sbin/date -u +%s + local end=1762759614 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.l8QAHhomQw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1p8DD5vQ0t +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.l8QAHhomQw +++ cat /tmp/tmp.1p8DD5vQ0t +++ rm /tmp/tmp.l8QAHhomQw /tmp/tmp.1p8DD5vQ0t +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ssUUANvHFz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FaunvQzRDO +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ssUUANvHFz +++ cat /tmp/tmp.FaunvQzRDO +++ rm /tmp/tmp.ssUUANvHFz /tmp/tmp.FaunvQzRDO +++ return 0 ++ local ip=35.226.246.51 ++ '[' -n 35.226.246.51 -a 35.226.246.51 '!=' null ']' ++ echo 35.226.246.51 ++ return + local endpoint=35.226.246.51 + '[' -z node_boot_time_seconds ']' + '[' -z glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 ']' + local wait_count=30 + local retry=0 ++ jq '.data.result[0].values[][1]' ++ curl -s -k -H 'Authorization: Bearer glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33' 'https://35.226.246.51/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-pmm3-8293-monitoring-pmm3-cfg-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-pmm3-8293-monitoring-pmm3-cfg-1%22%7D%29&start=1762759554&end=1762759614&step=60' ++ grep '^"[0-9]' + [[ -n "1762754232" "1762754232" ]] + get_metric_values mongodb_connections monitoring-pmm3-8293-monitoring-pmm3-cfg-1 glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 + local metric=mongodb_connections + local instance=monitoring-pmm3-8293-monitoring-pmm3-cfg-1 + local token=glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1762759556 ++ /usr/sbin/date -u +%s + local end=1762759616 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3l3dGbLsju ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MOQJMe5iCm +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.3l3dGbLsju +++ cat /tmp/tmp.MOQJMe5iCm +++ rm /tmp/tmp.3l3dGbLsju /tmp/tmp.MOQJMe5iCm +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.z9ysMbf7ab ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1ytjAVrDp8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.z9ysMbf7ab +++ cat /tmp/tmp.1ytjAVrDp8 +++ rm /tmp/tmp.z9ysMbf7ab /tmp/tmp.1ytjAVrDp8 +++ return 0 ++ local ip=35.226.246.51 ++ '[' -n 35.226.246.51 -a 35.226.246.51 '!=' null ']' ++ echo 35.226.246.51 ++ return + local endpoint=35.226.246.51 + '[' -z mongodb_connections ']' + '[' -z glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 ']' + local wait_count=30 + local retry=0 ++ curl -s -k -H 'Authorization: Bearer glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33' 'https://35.226.246.51/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-pmm3-8293-monitoring-pmm3-cfg-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-pmm3-8293-monitoring-pmm3-cfg-1%22%7D%29&start=1762759556&end=1762759616&step=60' ++ jq '.data.result[0].values[][1]' ++ grep '^"[0-9]' + [[ -n "0" "0" ]] + desc 'check mongos metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongos metrics ----------------------------------------------------------------------------------- ++ kubectl get pod -l app.kubernetes.io/component=mongos -o 'jsonpath={.items[0].metadata.name}' + MONGOS_POD_NAME=monitoring-pmm3-mongos-0 + get_metric_values node_boot_time_seconds monitoring-pmm3-8293-monitoring-pmm3-mongos-0 glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 + local metric=node_boot_time_seconds + local instance=monitoring-pmm3-8293-monitoring-pmm3-mongos-0 + local token=glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1762759558 ++ /usr/sbin/date -u +%s + local end=1762759618 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jUlHJSqOfm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mr8yfzdO4g +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.jUlHJSqOfm +++ cat /tmp/tmp.mr8yfzdO4g +++ rm /tmp/tmp.jUlHJSqOfm /tmp/tmp.mr8yfzdO4g +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.o5pcOv0EOG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8XWQ7mRS36 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.o5pcOv0EOG +++ cat /tmp/tmp.8XWQ7mRS36 +++ rm /tmp/tmp.o5pcOv0EOG /tmp/tmp.8XWQ7mRS36 +++ return 0 ++ local ip=35.226.246.51 ++ '[' -n 35.226.246.51 -a 35.226.246.51 '!=' null ']' ++ echo 35.226.246.51 ++ return + local endpoint=35.226.246.51 + '[' -z node_boot_time_seconds ']' + '[' -z glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 ']' + local wait_count=30 + local retry=0 ++ curl -s -k -H 'Authorization: Bearer glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33' 'https://35.226.246.51/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-pmm3-8293-monitoring-pmm3-mongos-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-pmm3-8293-monitoring-pmm3-mongos-0%22%7D%29&start=1762759558&end=1762759618&step=60' ++ jq '.data.result[0].values[][1]' ++ grep '^"[0-9]' + [[ -n "1762754232" "1762754232" ]] + sleep 90 + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan_values mongodb dev-mongod glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 + local service_type=mongodb + local environment=dev-mongod + local token=glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 + local start + local end + local endpoint ++ /usr/sbin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2025-11-09T19:28:29+00:00 ++ /usr/sbin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2025-11-10T07:28:29+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JejQPtB09s ++++ mktemp +++ local LAST_ERR=/tmp/tmp.luQyBn19FI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.JejQPtB09s +++ cat /tmp/tmp.luQyBn19FI +++ rm /tmp/tmp.JejQPtB09s /tmp/tmp.luQyBn19FI +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6VcEaqeKup ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7atSVIWHmt +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6VcEaqeKup +++ cat /tmp/tmp.7atSVIWHmt +++ rm /tmp/tmp.6VcEaqeKup /tmp/tmp.7atSVIWHmt +++ return 0 ++ local ip=35.226.246.51 ++ '[' -n 35.226.246.51 -a 35.226.246.51 '!=' null ']' ++ echo 35.226.246.51 ++ return + endpoint=35.226.246.51 + cat + local response + local wait_count=30 + local retry=0 ++ curl -s -k -H 'Authorization: Bearer glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33' -XPOST -d @payload.json https://35.226.246.51/v1/qan/metrics:getReport ++ jq '.rows[].fingerprint' + [[ -n "TOTAL" "db.version.find({}).limit(?)" "db.system.version.find({\"_id\":\"?\"}).limit(?).batchSize(1)" "db.runCommand({\"$configTime\":\"?\",\"$topologyTime\":\"?\",\"batchSize\":\"?\",\"collection\":\"?\",\"getMore\":\"?\",\"maxTimeMS\":\"?\",\"term\":\"?\"})" "db.oplog.rs.find({}).sort({\"$natural\":1}).limit(?)" "db.oplog.rs.find({}).sort({\"$natural\":-1}).limit(?)" "db.system.sessions.update({\"_id\":{\"id\":\"?\",\"uid\":\"?\"}}, [{\"$set\":{\"lastUse\":\"?\"}}], {\"upsert\":true})" "db.system.sessions.deleteOne({\"_id\":{\"id\":\"?\",\"uid\":\"?\"}})" ]] + rm -f payload.json + get_qan_values mongodb dev-mongos glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 + local service_type=mongodb + local environment=dev-mongos + local token=glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 + local start + local end + local endpoint ++ /usr/sbin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2025-11-09T19:28:33+00:00 ++ /usr/sbin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2025-11-10T07:28:33+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.T5H0JfNPM5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9fv6LE3kpJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.T5H0JfNPM5 +++ cat /tmp/tmp.9fv6LE3kpJ +++ rm /tmp/tmp.T5H0JfNPM5 /tmp/tmp.9fv6LE3kpJ +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1A8kQ2Lmev ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PhC3OKOcIW +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.1A8kQ2Lmev +++ cat /tmp/tmp.PhC3OKOcIW +++ rm /tmp/tmp.1A8kQ2Lmev /tmp/tmp.PhC3OKOcIW +++ return 0 ++ local ip=35.226.246.51 ++ '[' -n 35.226.246.51 -a 35.226.246.51 '!=' null ']' ++ echo 35.226.246.51 ++ return + endpoint=35.226.246.51 + cat + local response + local wait_count=30 + local retry=0 ++ curl -s -k -H 'Authorization: Bearer glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33' -XPOST -d @payload.json https://35.226.246.51/v1/qan/metrics:getReport ++ jq '.rows[].fingerprint' + [[ -n "" ]] + rm -f payload.json + desc 'verify that the custom cluster name is configured' + set +o xtrace ----------------------------------------------------------------------------------- verify that the custom cluster name is configured ----------------------------------------------------------------------------------- + verify_custom_cluster_name super-custom glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 monitoring-pmm3-8293-monitoring-pmm3-mongos-0 monitoring-pmm3-8293-monitoring-pmm3-cfg-0 monitoring-pmm3-8293-monitoring-pmm3-rs0-0 + local expected_cluster=super-custom + local token=glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33 + shift 2 + service_names=('monitoring-pmm3-8293-monitoring-pmm3-mongos-0' 'monitoring-pmm3-8293-monitoring-pmm3-cfg-0' 'monitoring-pmm3-8293-monitoring-pmm3-rs0-0') + local service_names + local endpoint ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.en4newoirn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mcVjmVRVky +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.en4newoirn +++ cat /tmp/tmp.mcVjmVRVky +++ rm /tmp/tmp.en4newoirn /tmp/tmp.mcVjmVRVky +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.QFZvq4otnC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.kvUaa4nH3Y +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.QFZvq4otnC +++ cat /tmp/tmp.kvUaa4nH3Y +++ rm /tmp/tmp.QFZvq4otnC /tmp/tmp.kvUaa4nH3Y +++ return 0 ++ local ip=35.226.246.51 ++ '[' -n 35.226.246.51 -a 35.226.246.51 '!=' null ']' ++ echo 35.226.246.51 ++ return + endpoint=35.226.246.51 + local response ++ curl -s -k -H 'Authorization: Bearer glsa_1vMWWGBUNG0rUmcHt773T0IxjprwL54g_e14e1b33' 'https://35.226.246.51/v1/inventory/services?service_type=SERVICE_TYPE_MONGODB_SERVICE' + response='{ "mysql": [], "mongodb": [ { "service_id": "290b9c04-cf23-464a-8601-816fb1e40be5", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-rs0-2", "node_id": "3e7ec5ac-6ceb-4497-a6f9-18b53dbbe423", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "29cc4159-013a-4528-8913-4f42b602ca3c", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-mongos-1", "node_id": "67c2cee6-b266-4c92-9ec5-3652e48cb708", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "3179e3d2-38b0-4c10-8ec4-ed555503674f", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-mongos-2", "node_id": "8a5d5ef5-605d-48e3-b7bf-781301542e7d", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "62ff0263-8abe-454c-a69c-7c05c093d503", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-cfg-2", "node_id": "e19a1267-0996-4648-8f85-6d0a4c5dab52", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "7f083025-5d83-47e5-bd9f-55b50c92b262", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-rs0-1", "node_id": "90bd3f9a-f477-423a-b7ff-fd5c15b29d14", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "974da347-d0a4-41e1-8b32-dab75eab5eb4", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-rs0-0", "node_id": "93b57028-5902-43a3-afc2-ed5e72ce1d67", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "98fb1c22-ba38-4259-98fb-2c9af7784f22", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-cfg-1", "node_id": "2119bda7-5ac4-4c82-82c8-5f28e3d6363d", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "ab07ebf0-a39f-4859-bf87-3288dfa20430", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-mongos-0", "node_id": "0057a07a-542d-4909-bf46-96fc07ebd94e", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "fb99726e-b3bb-4809-9fac-cee4fe7021ad", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-cfg-0", "node_id": "a4816d52-69fc-4ca5-9512-f3aba309817f", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" } ], "postgresql": [], "proxysql": [], "haproxy": [], "external": [], "valkey": [] }' + local verified=0 + for service_name in "${service_names[@]}" + local actual_cluster ++ jq -r --arg name monitoring-pmm3-8293-monitoring-pmm3-mongos-0 ' .mongodb[] | select(.service_name == $name) | .cluster ' ++ echo '{ "mysql": [], "mongodb": [ { "service_id": "290b9c04-cf23-464a-8601-816fb1e40be5", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-rs0-2", "node_id": "3e7ec5ac-6ceb-4497-a6f9-18b53dbbe423", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "29cc4159-013a-4528-8913-4f42b602ca3c", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-mongos-1", "node_id": "67c2cee6-b266-4c92-9ec5-3652e48cb708", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "3179e3d2-38b0-4c10-8ec4-ed555503674f", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-mongos-2", "node_id": "8a5d5ef5-605d-48e3-b7bf-781301542e7d", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "62ff0263-8abe-454c-a69c-7c05c093d503", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-cfg-2", "node_id": "e19a1267-0996-4648-8f85-6d0a4c5dab52", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "7f083025-5d83-47e5-bd9f-55b50c92b262", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-rs0-1", "node_id": "90bd3f9a-f477-423a-b7ff-fd5c15b29d14", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "974da347-d0a4-41e1-8b32-dab75eab5eb4", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-rs0-0", "node_id": "93b57028-5902-43a3-afc2-ed5e72ce1d67", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "98fb1c22-ba38-4259-98fb-2c9af7784f22", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-cfg-1", "node_id": "2119bda7-5ac4-4c82-82c8-5f28e3d6363d", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "ab07ebf0-a39f-4859-bf87-3288dfa20430", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-mongos-0", "node_id": "0057a07a-542d-4909-bf46-96fc07ebd94e", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "fb99726e-b3bb-4809-9fac-cee4fe7021ad", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-cfg-0", "node_id": "a4816d52-69fc-4ca5-9512-f3aba309817f", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" } ], "postgresql": [], "proxysql": [], "haproxy": [], "external": [], "valkey": [] }' + actual_cluster=super-custom + [[ -z super-custom ]] + [[ super-custom == \n\u\l\l ]] + [[ super-custom != \s\u\p\e\r\-\c\u\s\t\o\m ]] + for service_name in "${service_names[@]}" + local actual_cluster ++ echo '{ "mysql": [], "mongodb": [ { "service_id": "290b9c04-cf23-464a-8601-816fb1e40be5", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-rs0-2", "node_id": "3e7ec5ac-6ceb-4497-a6f9-18b53dbbe423", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "29cc4159-013a-4528-8913-4f42b602ca3c", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-mongos-1", "node_id": "67c2cee6-b266-4c92-9ec5-3652e48cb708", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "3179e3d2-38b0-4c10-8ec4-ed555503674f", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-mongos-2", "node_id": "8a5d5ef5-605d-48e3-b7bf-781301542e7d", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "62ff0263-8abe-454c-a69c-7c05c093d503", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-cfg-2", "node_id": "e19a1267-0996-4648-8f85-6d0a4c5dab52", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "7f083025-5d83-47e5-bd9f-55b50c92b262", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-rs0-1", "node_id": "90bd3f9a-f477-423a-b7ff-fd5c15b29d14", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "974da347-d0a4-41e1-8b32-dab75eab5eb4", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-rs0-0", "node_id": "93b57028-5902-43a3-afc2-ed5e72ce1d67", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "98fb1c22-ba38-4259-98fb-2c9af7784f22", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-cfg-1", "node_id": "2119bda7-5ac4-4c82-82c8-5f28e3d6363d", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "ab07ebf0-a39f-4859-bf87-3288dfa20430", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-mongos-0", "node_id": "0057a07a-542d-4909-bf46-96fc07ebd94e", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "fb99726e-b3bb-4809-9fac-cee4fe7021ad", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-cfg-0", "node_id": "a4816d52-69fc-4ca5-9512-f3aba309817f", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" } ], "postgresql": [], "proxysql": [], "haproxy": [], "external": [], "valkey": [] }' ++ jq -r --arg name monitoring-pmm3-8293-monitoring-pmm3-cfg-0 ' .mongodb[] | select(.service_name == $name) | .cluster ' + actual_cluster=super-custom + [[ -z super-custom ]] + [[ super-custom == \n\u\l\l ]] + [[ super-custom != \s\u\p\e\r\-\c\u\s\t\o\m ]] + for service_name in "${service_names[@]}" + local actual_cluster ++ echo '{ "mysql": [], "mongodb": [ { "service_id": "290b9c04-cf23-464a-8601-816fb1e40be5", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-rs0-2", "node_id": "3e7ec5ac-6ceb-4497-a6f9-18b53dbbe423", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "29cc4159-013a-4528-8913-4f42b602ca3c", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-mongos-1", "node_id": "67c2cee6-b266-4c92-9ec5-3652e48cb708", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "3179e3d2-38b0-4c10-8ec4-ed555503674f", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-mongos-2", "node_id": "8a5d5ef5-605d-48e3-b7bf-781301542e7d", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "62ff0263-8abe-454c-a69c-7c05c093d503", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-cfg-2", "node_id": "e19a1267-0996-4648-8f85-6d0a4c5dab52", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "7f083025-5d83-47e5-bd9f-55b50c92b262", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-rs0-1", "node_id": "90bd3f9a-f477-423a-b7ff-fd5c15b29d14", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "974da347-d0a4-41e1-8b32-dab75eab5eb4", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-rs0-0", "node_id": "93b57028-5902-43a3-afc2-ed5e72ce1d67", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "98fb1c22-ba38-4259-98fb-2c9af7784f22", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-cfg-1", "node_id": "2119bda7-5ac4-4c82-82c8-5f28e3d6363d", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "ab07ebf0-a39f-4859-bf87-3288dfa20430", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-mongos-0", "node_id": "0057a07a-542d-4909-bf46-96fc07ebd94e", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongos", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" }, { "service_id": "fb99726e-b3bb-4809-9fac-cee4fe7021ad", "service_name": "monitoring-pmm3-8293-monitoring-pmm3-cfg-0", "node_id": "a4816d52-69fc-4ca5-9512-f3aba309817f", "address": "localhost", "port": 27019, "socket": "", "environment": "dev-mongod", "cluster": "super-custom", "replication_set": "", "custom_labels": {}, "version": "" } ], "postgresql": [], "proxysql": [], "haproxy": [], "external": [], "valkey": [] }' ++ jq -r --arg name monitoring-pmm3-8293-monitoring-pmm3-rs0-0 ' .mongodb[] | select(.service_name == $name) | .cluster ' + actual_cluster=super-custom + [[ -z super-custom ]] + [[ super-custom == \n\u\l\l ]] + [[ super-custom != \s\u\p\e\r\-\c\u\s\t\o\m ]] + return 0 + nodeList=($(get_node_id_from_pmm)) ++ get_node_id_from_pmm ++ nodeList=() ++ local -a nodeList +++ kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name ++++ mktemp +++ local LAST_OUT=/tmp/tmp.EMCQpx8KPl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qHoO84blXq +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.EMCQpx8KPl +++ cat /tmp/tmp.qHoO84blXq +++ rm /tmp/tmp.EMCQpx8KPl /tmp/tmp.qHoO84blXq +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-pmm3-cfg-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.oVeD2xoalO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fbLe6o4NVD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-pmm3-cfg-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.oVeD2xoalO +++ cat /tmp/tmp.fbLe6o4NVD +++ rm /tmp/tmp.oVeD2xoalO /tmp/tmp.fbLe6o4NVD +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-pmm3-cfg-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RS3PZw8AZ8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZOuYz3Oi6y +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-pmm3-cfg-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RS3PZw8AZ8 +++ cat /tmp/tmp.ZOuYz3Oi6y +++ rm /tmp/tmp.RS3PZw8AZ8 /tmp/tmp.ZOuYz3Oi6y +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-pmm3-cfg-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4gzf8Y4JR1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.OVMmFZi11S +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-pmm3-cfg-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4gzf8Y4JR1 +++ cat /tmp/tmp.OVMmFZi11S +++ rm /tmp/tmp.4gzf8Y4JR1 /tmp/tmp.OVMmFZi11S +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-pmm3-mongos-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.SB1CKUmH8K ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VFgfTB3DyV +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-pmm3-mongos-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.SB1CKUmH8K +++ cat /tmp/tmp.VFgfTB3DyV +++ rm /tmp/tmp.SB1CKUmH8K /tmp/tmp.VFgfTB3DyV +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ jq -r .pmm_agent_status.node_id +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-pmm3-mongos-1 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.H2oMOHRhOT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SrtUeDrr1B +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-pmm3-mongos-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.H2oMOHRhOT +++ cat /tmp/tmp.SrtUeDrr1B +++ rm /tmp/tmp.H2oMOHRhOT /tmp/tmp.SrtUeDrr1B +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-pmm3-mongos-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mYDlvBOtx6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3N1J9Kd7iB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-pmm3-mongos-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.mYDlvBOtx6 +++ cat /tmp/tmp.3N1J9Kd7iB +++ rm /tmp/tmp.mYDlvBOtx6 /tmp/tmp.3N1J9Kd7iB +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-pmm3-rs0-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wkBlYXtCg4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.dc9ZUsyI79 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-pmm3-rs0-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wkBlYXtCg4 +++ cat /tmp/tmp.dc9ZUsyI79 +++ rm /tmp/tmp.wkBlYXtCg4 /tmp/tmp.dc9ZUsyI79 +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-pmm3-rs0-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Q4A1VTg34L ++++ mktemp +++ local LAST_ERR=/tmp/tmp.etptvohNXT +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-pmm3-rs0-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Q4A1VTg34L +++ cat /tmp/tmp.etptvohNXT +++ rm /tmp/tmp.Q4A1VTg34L /tmp/tmp.etptvohNXT +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-pmm3-rs0-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jptFVx4Ee1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ncEkEChHn9 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-pmm3-rs0-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.jptFVx4Ee1 +++ cat /tmp/tmp.ncEkEChHn9 +++ rm /tmp/tmp.jptFVx4Ee1 /tmp/tmp.ncEkEChHn9 +++ return 0 ++ echo a4816d52-69fc-4ca5-9512-f3aba309817f 2119bda7-5ac4-4c82-82c8-5f28e3d6363d e19a1267-0996-4648-8f85-6d0a4c5dab52 0057a07a-542d-4909-bf46-96fc07ebd94e 67c2cee6-b266-4c92-9ec5-3652e48cb708 8a5d5ef5-605d-48e3-b7bf-781301542e7d 93b57028-5902-43a3-afc2-ed5e72ce1d67 90bd3f9a-f477-423a-b7ff-fd5c15b29d14 3e7ec5ac-6ceb-4497-a6f9-18b53dbbe423 + nodeList_from_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists a4816d52-69fc-4ca5-9512-f3aba309817f 2119bda7-5ac4-4c82-82c8-5f28e3d6363d e19a1267-0996-4648-8f85-6d0a4c5dab52 0057a07a-542d-4909-bf46-96fc07ebd94e 67c2cee6-b266-4c92-9ec5-3652e48cb708 8a5d5ef5-605d-48e3-b7bf-781301542e7d 93b57028-5902-43a3-afc2-ed5e72ce1d67 90bd3f9a-f477-423a-b7ff-fd5c15b29d14 3e7ec5ac-6ceb-4497-a6f9-18b53dbbe423 ++ nodeList=('a4816d52-69fc-4ca5-9512-f3aba309817f' '2119bda7-5ac4-4c82-82c8-5f28e3d6363d' 'e19a1267-0996-4648-8f85-6d0a4c5dab52' '0057a07a-542d-4909-bf46-96fc07ebd94e' '67c2cee6-b266-4c92-9ec5-3652e48cb708' '8a5d5ef5-605d-48e3-b7bf-781301542e7d' '93b57028-5902-43a3-afc2-ed5e72ce1d67' '90bd3f9a-f477-423a-b7ff-fd5c15b29d14' '3e7ec5ac-6ceb-4497-a6f9-18b53dbbe423') ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep a4816d52-69fc-4ca5-9512-f3aba309817f +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.2G00qLC6uh +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.JvAs3FTvsd ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.2G00qLC6uh ++++ cat /tmp/tmp.JvAs3FTvsd ++++ rm /tmp/tmp.2G00qLC6uh /tmp/tmp.JvAs3FTvsd ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.m8PHImbw9D +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.U6B6cW1Djq ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.m8PHImbw9D ++++ cat /tmp/tmp.U6B6cW1Djq ++++ rm /tmp/tmp.m8PHImbw9D /tmp/tmp.U6B6cW1Djq ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WRnvm4AmLN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SzwEmak0B7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.WRnvm4AmLN +++ cat /tmp/tmp.SzwEmak0B7 command terminated with exit code 1 +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.WRnvm4AmLN +++ cat /tmp/tmp.SzwEmak0B7 command terminated with exit code 1 +++ sleep 4 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.WRnvm4AmLN +++ cat /tmp/tmp.SzwEmak0B7 command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.WRnvm4AmLN +++ cat /tmp/tmp.SzwEmak0B7 command terminated with exit code 1 +++ rm /tmp/tmp.WRnvm4AmLN /tmp/tmp.SzwEmak0B7 +++ return 1 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 2119bda7-5ac4-4c82-82c8-5f28e3d6363d +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.yyVh5yODf0 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Mv7dXaWAZx ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.yyVh5yODf0 ++++ cat /tmp/tmp.Mv7dXaWAZx ++++ rm /tmp/tmp.yyVh5yODf0 /tmp/tmp.Mv7dXaWAZx ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.sMMaZ32lb5 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.H1EXTXWeGt ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.sMMaZ32lb5 ++++ cat /tmp/tmp.H1EXTXWeGt ++++ rm /tmp/tmp.sMMaZ32lb5 /tmp/tmp.H1EXTXWeGt ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.EDv0U2xCxF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8H242DdEAv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.EDv0U2xCxF +++ cat /tmp/tmp.8H242DdEAv command terminated with exit code 1 +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.EDv0U2xCxF +++ cat /tmp/tmp.8H242DdEAv command terminated with exit code 1 +++ sleep 4 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.EDv0U2xCxF +++ cat /tmp/tmp.8H242DdEAv command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.EDv0U2xCxF +++ cat /tmp/tmp.8H242DdEAv command terminated with exit code 1 +++ rm /tmp/tmp.EDv0U2xCxF /tmp/tmp.8H242DdEAv +++ return 1 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep e19a1267-0996-4648-8f85-6d0a4c5dab52 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.1BnpcNMzza +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.6umvavs4eX ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.1BnpcNMzza ++++ cat /tmp/tmp.6umvavs4eX ++++ rm /tmp/tmp.1BnpcNMzza /tmp/tmp.6umvavs4eX ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.MulLuH7Sm0 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.29RqG8zxlL ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.MulLuH7Sm0 ++++ cat /tmp/tmp.29RqG8zxlL ++++ rm /tmp/tmp.MulLuH7Sm0 /tmp/tmp.29RqG8zxlL ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GmwoHrvLJE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ev7SjspTag +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.GmwoHrvLJE +++ cat /tmp/tmp.ev7SjspTag command terminated with exit code 1 +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.GmwoHrvLJE +++ cat /tmp/tmp.ev7SjspTag command terminated with exit code 1 +++ sleep 4 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.GmwoHrvLJE +++ cat /tmp/tmp.ev7SjspTag command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.GmwoHrvLJE +++ cat /tmp/tmp.ev7SjspTag command terminated with exit code 1 +++ rm /tmp/tmp.GmwoHrvLJE /tmp/tmp.ev7SjspTag +++ return 1 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 0057a07a-542d-4909-bf46-96fc07ebd94e +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.obgn582wez +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.m1LKj3Ci6r ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.obgn582wez ++++ cat /tmp/tmp.m1LKj3Ci6r ++++ rm /tmp/tmp.obgn582wez /tmp/tmp.m1LKj3Ci6r ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.1XCUbNzzVZ +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.eeMy80auwi ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.1XCUbNzzVZ ++++ cat /tmp/tmp.eeMy80auwi ++++ rm /tmp/tmp.1XCUbNzzVZ /tmp/tmp.eeMy80auwi ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.EaZ32j3b3X ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vG8s43XFFs +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.EaZ32j3b3X +++ cat /tmp/tmp.vG8s43XFFs command terminated with exit code 1 +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.EaZ32j3b3X +++ cat /tmp/tmp.vG8s43XFFs command terminated with exit code 1 +++ sleep 4 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.EaZ32j3b3X +++ cat /tmp/tmp.vG8s43XFFs command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.EaZ32j3b3X +++ cat /tmp/tmp.vG8s43XFFs command terminated with exit code 1 +++ rm /tmp/tmp.EaZ32j3b3X /tmp/tmp.vG8s43XFFs +++ return 1 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 67c2cee6-b266-4c92-9ec5-3652e48cb708 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.tI8oU1Eh7l +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.99MMnHZ7PU ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.tI8oU1Eh7l ++++ cat /tmp/tmp.99MMnHZ7PU ++++ rm /tmp/tmp.tI8oU1Eh7l /tmp/tmp.99MMnHZ7PU ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.G2v3yrIJR0 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.SjuaJy8m0E ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.G2v3yrIJR0 ++++ cat /tmp/tmp.SjuaJy8m0E ++++ rm /tmp/tmp.G2v3yrIJR0 /tmp/tmp.SjuaJy8m0E ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Kmh8e3glOd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XGOrSFaQz6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Kmh8e3glOd +++ cat /tmp/tmp.XGOrSFaQz6 command terminated with exit code 1 +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Kmh8e3glOd +++ cat /tmp/tmp.XGOrSFaQz6 command terminated with exit code 1 +++ sleep 4 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Kmh8e3glOd +++ cat /tmp/tmp.XGOrSFaQz6 command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.Kmh8e3glOd +++ cat /tmp/tmp.XGOrSFaQz6 command terminated with exit code 1 +++ rm /tmp/tmp.Kmh8e3glOd /tmp/tmp.XGOrSFaQz6 +++ return 1 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 8a5d5ef5-605d-48e3-b7bf-781301542e7d +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Dhy4k0K4Zy +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.U9RwXIUIwq ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.Dhy4k0K4Zy ++++ cat /tmp/tmp.U9RwXIUIwq ++++ rm /tmp/tmp.Dhy4k0K4Zy /tmp/tmp.U9RwXIUIwq ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.IJlQN94sWP +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.CmTLj8PVex ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.IJlQN94sWP ++++ cat /tmp/tmp.CmTLj8PVex ++++ rm /tmp/tmp.IJlQN94sWP /tmp/tmp.CmTLj8PVex ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dBEvqeIb0i ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QTHjkgFQBn +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.dBEvqeIb0i +++ cat /tmp/tmp.QTHjkgFQBn command terminated with exit code 1 +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.dBEvqeIb0i +++ cat /tmp/tmp.QTHjkgFQBn command terminated with exit code 1 +++ sleep 4 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.dBEvqeIb0i +++ cat /tmp/tmp.QTHjkgFQBn command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.dBEvqeIb0i +++ cat /tmp/tmp.QTHjkgFQBn command terminated with exit code 1 +++ rm /tmp/tmp.dBEvqeIb0i /tmp/tmp.QTHjkgFQBn +++ return 1 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 93b57028-5902-43a3-afc2-ed5e72ce1d67 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.tvMgh1H42q +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.tSxRSwS80r ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.tvMgh1H42q ++++ cat /tmp/tmp.tSxRSwS80r ++++ rm /tmp/tmp.tvMgh1H42q /tmp/tmp.tSxRSwS80r ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.SSpwalGQKj +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.qnZjIq2L2n ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.SSpwalGQKj ++++ cat /tmp/tmp.qnZjIq2L2n ++++ rm /tmp/tmp.SSpwalGQKj /tmp/tmp.qnZjIq2L2n ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5MzYjgPJg8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1stsIRVlOQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.5MzYjgPJg8 +++ cat /tmp/tmp.1stsIRVlOQ command terminated with exit code 1 +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.5MzYjgPJg8 +++ cat /tmp/tmp.1stsIRVlOQ command terminated with exit code 1 +++ sleep 4 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.5MzYjgPJg8 +++ cat /tmp/tmp.1stsIRVlOQ command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.5MzYjgPJg8 +++ cat /tmp/tmp.1stsIRVlOQ command terminated with exit code 1 +++ rm /tmp/tmp.5MzYjgPJg8 /tmp/tmp.1stsIRVlOQ +++ return 1 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 90bd3f9a-f477-423a-b7ff-fd5c15b29d14 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.9DUqbBzzKL +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.nmH5lMh2Du ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.9DUqbBzzKL ++++ cat /tmp/tmp.nmH5lMh2Du ++++ rm /tmp/tmp.9DUqbBzzKL /tmp/tmp.nmH5lMh2Du ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.KAg7dyzf7J +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.E6wNAZdI35 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.KAg7dyzf7J ++++ cat /tmp/tmp.E6wNAZdI35 ++++ rm /tmp/tmp.KAg7dyzf7J /tmp/tmp.E6wNAZdI35 ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Z59rmUfP1q ++++ mktemp +++ local LAST_ERR=/tmp/tmp.37Ynt6ucG5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Z59rmUfP1q +++ cat /tmp/tmp.37Ynt6ucG5 command terminated with exit code 1 +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Z59rmUfP1q +++ cat /tmp/tmp.37Ynt6ucG5 command terminated with exit code 1 +++ sleep 4 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Z59rmUfP1q +++ cat /tmp/tmp.37Ynt6ucG5 command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.Z59rmUfP1q +++ cat /tmp/tmp.37Ynt6ucG5 command terminated with exit code 1 +++ rm /tmp/tmp.Z59rmUfP1q /tmp/tmp.37Ynt6ucG5 +++ return 1 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 3e7ec5ac-6ceb-4497-a6f9-18b53dbbe423 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.5eAGZqI94b +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.5PaQzZGuG2 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.5eAGZqI94b ++++ cat /tmp/tmp.5PaQzZGuG2 ++++ rm /tmp/tmp.5eAGZqI94b /tmp/tmp.5PaQzZGuG2 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.G67bq9WOwv +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.6DNYUmIREQ ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.G67bq9WOwv ++++ cat /tmp/tmp.6DNYUmIREQ ++++ rm /tmp/tmp.G67bq9WOwv /tmp/tmp.6DNYUmIREQ ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.AEF0JBQUKM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TD39xmsHTt +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.AEF0JBQUKM +++ cat /tmp/tmp.TD39xmsHTt command terminated with exit code 1 +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.AEF0JBQUKM +++ cat /tmp/tmp.TD39xmsHTt command terminated with exit code 1 +++ sleep 4 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.AEF0JBQUKM +++ cat /tmp/tmp.TD39xmsHTt command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.AEF0JBQUKM +++ cat /tmp/tmp.TD39xmsHTt command terminated with exit code 1 +++ rm /tmp/tmp.AEF0JBQUKM /tmp/tmp.TD39xmsHTt +++ return 1 ++ echo + kubectl_bin patch psmdb monitoring-pmm3 --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.eHKsVqF7cZ ++ mktemp + local LAST_ERR=/tmp/tmp.dktqejFQuA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb monitoring-pmm3 --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eHKsVqF7cZ perconaservermongodb.psmdb.percona.com/monitoring-pmm3 patched + cat /tmp/tmp.dktqejFQuA + rm /tmp/tmp.eHKsVqF7cZ /tmp/tmp.dktqejFQuA + return 0 + wait_for_delete pod/monitoring-pmm3-mongos-0 + local res=pod/monitoring-pmm3-mongos-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-pmm3-mongos-0 to be deleted.........................Error from server (NotFound): pods "monitoring-pmm3-mongos-0" not found Error from server (NotFound): pods "monitoring-pmm3-mongos-0" not found Error from server (NotFound): pods "monitoring-pmm3-mongos-0" not found Error from server (NotFound): pods "monitoring-pmm3-mongos-0" not found + wait_for_delete pod/monitoring-pmm3-rs0-0 + local res=pod/monitoring-pmm3-rs0-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-pmm3-rs0-0 to be deleted.........Error from server (NotFound): pods "monitoring-pmm3-rs0-0" not found Error from server (NotFound): pods "monitoring-pmm3-rs0-0" not found Error from server (NotFound): pods "monitoring-pmm3-rs0-0" not found Error from server (NotFound): pods "monitoring-pmm3-rs0-0" not found + wait_for_delete pod/monitoring-pmm3-cfg-0 + local res=pod/monitoring-pmm3-cfg-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-pmm3-cfg-0 to be deleted......Error from server (NotFound): pods "monitoring-pmm3-cfg-0" not found Error from server (NotFound): pods "monitoring-pmm3-cfg-0" not found Error from server (NotFound): pods "monitoring-pmm3-cfg-0" not found Error from server (NotFound): pods "monitoring-pmm3-cfg-0" not found + desc 'check if services are not deleted' + set +o xtrace ----------------------------------------------------------------------------------- check if services are not deleted ----------------------------------------------------------------------------------- + kubectl_bin get svc monitoring-pmm3-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.csXYJWGOxU ++ mktemp + local LAST_ERR=/tmp/tmp.ZRDBY5MePu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-pmm3-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.csXYJWGOxU NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-pmm3-rs0 ClusterIP None 27019/TCP 17m + cat /tmp/tmp.ZRDBY5MePu + rm /tmp/tmp.csXYJWGOxU /tmp/tmp.ZRDBY5MePu + return 0 + kubectl_bin get svc monitoring-pmm3-cfg ++ mktemp + local LAST_OUT=/tmp/tmp.8owpNfGPzy ++ mktemp + local LAST_ERR=/tmp/tmp.WD9xFYsyUN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-pmm3-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8owpNfGPzy NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-pmm3-cfg ClusterIP None 27019/TCP 17m + cat /tmp/tmp.WD9xFYsyUN + rm /tmp/tmp.8owpNfGPzy /tmp/tmp.WD9xFYsyUN + return 0 + kubectl_bin get svc monitoring-pmm3-mongos ++ mktemp + local LAST_OUT=/tmp/tmp.DVhq0WIYzY ++ mktemp + local LAST_ERR=/tmp/tmp.p4kwfovZ6p + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-pmm3-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DVhq0WIYzY NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-pmm3-mongos ClusterIP 34.118.239.81 27019/TCP 17m + cat /tmp/tmp.p4kwfovZ6p + rm /tmp/tmp.DVhq0WIYzY /tmp/tmp.p4kwfovZ6p + return 0 + does_node_id_exists_in_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists a4816d52-69fc-4ca5-9512-f3aba309817f 2119bda7-5ac4-4c82-82c8-5f28e3d6363d e19a1267-0996-4648-8f85-6d0a4c5dab52 0057a07a-542d-4909-bf46-96fc07ebd94e 67c2cee6-b266-4c92-9ec5-3652e48cb708 8a5d5ef5-605d-48e3-b7bf-781301542e7d 93b57028-5902-43a3-afc2-ed5e72ce1d67 90bd3f9a-f477-423a-b7ff-fd5c15b29d14 3e7ec5ac-6ceb-4497-a6f9-18b53dbbe423 ++ nodeList=('a4816d52-69fc-4ca5-9512-f3aba309817f' '2119bda7-5ac4-4c82-82c8-5f28e3d6363d' 'e19a1267-0996-4648-8f85-6d0a4c5dab52' '0057a07a-542d-4909-bf46-96fc07ebd94e' '67c2cee6-b266-4c92-9ec5-3652e48cb708' '8a5d5ef5-605d-48e3-b7bf-781301542e7d' '93b57028-5902-43a3-afc2-ed5e72ce1d67' '90bd3f9a-f477-423a-b7ff-fd5c15b29d14' '3e7ec5ac-6ceb-4497-a6f9-18b53dbbe423') ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep a4816d52-69fc-4ca5-9512-f3aba309817f +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.lEQ9MBtUfm +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.asVWAEoFCC ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.lEQ9MBtUfm ++++ cat /tmp/tmp.asVWAEoFCC ++++ rm /tmp/tmp.lEQ9MBtUfm /tmp/tmp.asVWAEoFCC ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.iURV5EhjCB +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.bsfrSXx9H0 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.iURV5EhjCB ++++ cat /tmp/tmp.bsfrSXx9H0 ++++ rm /tmp/tmp.iURV5EhjCB /tmp/tmp.bsfrSXx9H0 ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3N7wl6SOL6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Bb4yguMWvE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.3N7wl6SOL6 +++ cat /tmp/tmp.Bb4yguMWvE command terminated with exit code 1 +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.3N7wl6SOL6 +++ cat /tmp/tmp.Bb4yguMWvE command terminated with exit code 1 +++ sleep 4 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.3N7wl6SOL6 +++ cat /tmp/tmp.Bb4yguMWvE command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.3N7wl6SOL6 +++ cat /tmp/tmp.Bb4yguMWvE command terminated with exit code 1 +++ rm /tmp/tmp.3N7wl6SOL6 /tmp/tmp.Bb4yguMWvE +++ return 1 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 2119bda7-5ac4-4c82-82c8-5f28e3d6363d +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.jg2NmQQgmr +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.1g4oIveWTw ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.jg2NmQQgmr ++++ cat /tmp/tmp.1g4oIveWTw ++++ rm /tmp/tmp.jg2NmQQgmr /tmp/tmp.1g4oIveWTw ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.7qmeogjy4D +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.y6AYOfJuHJ ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.7qmeogjy4D ++++ cat /tmp/tmp.y6AYOfJuHJ ++++ rm /tmp/tmp.7qmeogjy4D /tmp/tmp.y6AYOfJuHJ ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RfjBhxYIoY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MzshTkwBBh +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.RfjBhxYIoY +++ cat /tmp/tmp.MzshTkwBBh command terminated with exit code 1 +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.RfjBhxYIoY +++ cat /tmp/tmp.MzshTkwBBh command terminated with exit code 1 +++ sleep 4 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.RfjBhxYIoY +++ cat /tmp/tmp.MzshTkwBBh command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.RfjBhxYIoY +++ cat /tmp/tmp.MzshTkwBBh command terminated with exit code 1 +++ rm /tmp/tmp.RfjBhxYIoY /tmp/tmp.MzshTkwBBh +++ return 1 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep e19a1267-0996-4648-8f85-6d0a4c5dab52 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Gs59PDZA6I +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.iStpjiJK4Y ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.Gs59PDZA6I ++++ cat /tmp/tmp.iStpjiJK4Y ++++ rm /tmp/tmp.Gs59PDZA6I /tmp/tmp.iStpjiJK4Y ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.3X36TExXe0 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.5UlAtTWzyq ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.3X36TExXe0 ++++ cat /tmp/tmp.5UlAtTWzyq ++++ rm /tmp/tmp.3X36TExXe0 /tmp/tmp.5UlAtTWzyq ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ELYM4dsfs2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xwlhtVT8E5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.ELYM4dsfs2 +++ cat /tmp/tmp.xwlhtVT8E5 command terminated with exit code 1 +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.ELYM4dsfs2 +++ cat /tmp/tmp.xwlhtVT8E5 command terminated with exit code 1 +++ sleep 4 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.ELYM4dsfs2 +++ cat /tmp/tmp.xwlhtVT8E5 command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.ELYM4dsfs2 +++ cat /tmp/tmp.xwlhtVT8E5 command terminated with exit code 1 +++ rm /tmp/tmp.ELYM4dsfs2 /tmp/tmp.xwlhtVT8E5 +++ return 1 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 0057a07a-542d-4909-bf46-96fc07ebd94e +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.fS6uH4E9qO +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.haJLXwtQDc ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.fS6uH4E9qO ++++ cat /tmp/tmp.haJLXwtQDc ++++ rm /tmp/tmp.fS6uH4E9qO /tmp/tmp.haJLXwtQDc ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.SnS9swNZyK +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.u2PX4hb3T0 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.SnS9swNZyK ++++ cat /tmp/tmp.u2PX4hb3T0 ++++ rm /tmp/tmp.SnS9swNZyK /tmp/tmp.u2PX4hb3T0 ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mVWEgoSQiU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FvmrWtVicJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.mVWEgoSQiU +++ cat /tmp/tmp.FvmrWtVicJ command terminated with exit code 1 +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.mVWEgoSQiU +++ cat /tmp/tmp.FvmrWtVicJ command terminated with exit code 1 +++ sleep 4 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.mVWEgoSQiU +++ cat /tmp/tmp.FvmrWtVicJ command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.mVWEgoSQiU +++ cat /tmp/tmp.FvmrWtVicJ command terminated with exit code 1 +++ rm /tmp/tmp.mVWEgoSQiU /tmp/tmp.FvmrWtVicJ +++ return 1 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 67c2cee6-b266-4c92-9ec5-3652e48cb708 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.xCYmAYgvXG +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.KQAqTrFttF ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.xCYmAYgvXG ++++ cat /tmp/tmp.KQAqTrFttF ++++ rm /tmp/tmp.xCYmAYgvXG /tmp/tmp.KQAqTrFttF ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.5MUoVTg3PA +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ajKn7GbcKc ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.5MUoVTg3PA ++++ cat /tmp/tmp.ajKn7GbcKc ++++ rm /tmp/tmp.5MUoVTg3PA /tmp/tmp.ajKn7GbcKc ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rsAg9hPkQn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xADwSd5np1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.rsAg9hPkQn +++ cat /tmp/tmp.xADwSd5np1 command terminated with exit code 1 +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.rsAg9hPkQn +++ cat /tmp/tmp.xADwSd5np1 command terminated with exit code 1 +++ sleep 4 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.rsAg9hPkQn +++ cat /tmp/tmp.xADwSd5np1 command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.rsAg9hPkQn +++ cat /tmp/tmp.xADwSd5np1 command terminated with exit code 1 +++ rm /tmp/tmp.rsAg9hPkQn /tmp/tmp.xADwSd5np1 +++ return 1 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 8a5d5ef5-605d-48e3-b7bf-781301542e7d +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.FzQdtxyW6h +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.kAlFPfPwuA ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.FzQdtxyW6h ++++ cat /tmp/tmp.kAlFPfPwuA ++++ rm /tmp/tmp.FzQdtxyW6h /tmp/tmp.kAlFPfPwuA ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.9giOY4Pcwi +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.38RKJbvDqT ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.9giOY4Pcwi ++++ cat /tmp/tmp.38RKJbvDqT ++++ rm /tmp/tmp.9giOY4Pcwi /tmp/tmp.38RKJbvDqT ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OfnQMeDv4h ++++ mktemp +++ local LAST_ERR=/tmp/tmp.EwkfsFlMS8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.OfnQMeDv4h +++ cat /tmp/tmp.EwkfsFlMS8 command terminated with exit code 1 +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.OfnQMeDv4h +++ cat /tmp/tmp.EwkfsFlMS8 command terminated with exit code 1 +++ sleep 4 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.OfnQMeDv4h +++ cat /tmp/tmp.EwkfsFlMS8 command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.OfnQMeDv4h +++ cat /tmp/tmp.EwkfsFlMS8 command terminated with exit code 1 +++ rm /tmp/tmp.OfnQMeDv4h /tmp/tmp.EwkfsFlMS8 +++ return 1 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 93b57028-5902-43a3-afc2-ed5e72ce1d67 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.dQnwkiRzMf +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.LYx7MataB7 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.dQnwkiRzMf ++++ cat /tmp/tmp.LYx7MataB7 ++++ rm /tmp/tmp.dQnwkiRzMf /tmp/tmp.LYx7MataB7 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.fjxK1WzWG8 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.vnP0Hs2Sza ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.fjxK1WzWG8 ++++ cat /tmp/tmp.vnP0Hs2Sza ++++ rm /tmp/tmp.fjxK1WzWG8 /tmp/tmp.vnP0Hs2Sza ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fXkRm92pLL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UqBsXtg8WQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.fXkRm92pLL +++ cat /tmp/tmp.UqBsXtg8WQ command terminated with exit code 1 +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.fXkRm92pLL +++ cat /tmp/tmp.UqBsXtg8WQ command terminated with exit code 1 +++ sleep 4 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.fXkRm92pLL +++ cat /tmp/tmp.UqBsXtg8WQ command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.fXkRm92pLL +++ cat /tmp/tmp.UqBsXtg8WQ command terminated with exit code 1 +++ rm /tmp/tmp.fXkRm92pLL /tmp/tmp.UqBsXtg8WQ +++ return 1 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 90bd3f9a-f477-423a-b7ff-fd5c15b29d14 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.VBgfPLGYUH +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.3tnJ172s09 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.VBgfPLGYUH ++++ cat /tmp/tmp.3tnJ172s09 ++++ rm /tmp/tmp.VBgfPLGYUH /tmp/tmp.3tnJ172s09 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.tE6zeK0JFD +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.hdfwjt3jBV ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.tE6zeK0JFD ++++ cat /tmp/tmp.hdfwjt3jBV ++++ rm /tmp/tmp.tE6zeK0JFD /tmp/tmp.hdfwjt3jBV ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.500cn1IrLf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ERULAKmnj3 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.500cn1IrLf +++ cat /tmp/tmp.ERULAKmnj3 command terminated with exit code 1 +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.500cn1IrLf +++ cat /tmp/tmp.ERULAKmnj3 command terminated with exit code 1 +++ sleep 4 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.500cn1IrLf +++ cat /tmp/tmp.ERULAKmnj3 command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.500cn1IrLf +++ cat /tmp/tmp.ERULAKmnj3 command terminated with exit code 1 +++ rm /tmp/tmp.500cn1IrLf /tmp/tmp.ERULAKmnj3 +++ return 1 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep 3e7ec5ac-6ceb-4497-a6f9-18b53dbbe423 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.FPlqKDJBf4 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.cCOKlgeMbR ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.FPlqKDJBf4 ++++ cat /tmp/tmp.cCOKlgeMbR ++++ rm /tmp/tmp.FPlqKDJBf4 /tmp/tmp.cCOKlgeMbR ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.AV2npYfQMT +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.GmYBOyzm88 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.AV2npYfQMT ++++ cat /tmp/tmp.GmYBOyzm88 ++++ rm /tmp/tmp.AV2npYfQMT /tmp/tmp.GmYBOyzm88 ++++ return 0 +++ kubectl_bin exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Y5FzPs2K32 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mkaKNhwSl1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Y5FzPs2K32 +++ cat /tmp/tmp.mkaKNhwSl1 command terminated with exit code 1 +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Y5FzPs2K32 +++ cat /tmp/tmp.mkaKNhwSl1 command terminated with exit code 1 +++ sleep 4 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-pmm3-8293 monitoring-server-0 -- pmm-admin --server-url=https://admin:admin@35.226.246.51/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Y5FzPs2K32 +++ cat /tmp/tmp.mkaKNhwSl1 command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.Y5FzPs2K32 +++ cat /tmp/tmp.mkaKNhwSl1 command terminated with exit code 1 +++ rm /tmp/tmp.Y5FzPs2K32 /tmp/tmp.mkaKNhwSl1 +++ return 1 ++ echo + [[ -n '' ]] ++ kubectl_bin logs monitoring-pmm3-rs0-0 pmm-client ++ grep -c 'cannot auto discover databases and collections' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VJ7ZV6G062 +++ mktemp ++ local LAST_ERR=/tmp/tmp.t25lGfbBED ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl logs monitoring-pmm3-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.VJ7ZV6G062 ++ cat /tmp/tmp.t25lGfbBED error: error from server (NotFound): pods "monitoring-pmm3-rs0-0" not found in namespace "monitoring-pmm3-8293" ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl logs monitoring-pmm3-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.VJ7ZV6G062 ++ cat /tmp/tmp.t25lGfbBED error: error from server (NotFound): pods "monitoring-pmm3-rs0-0" not found in namespace "monitoring-pmm3-8293" ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl logs monitoring-pmm3-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.VJ7ZV6G062 ++ cat /tmp/tmp.t25lGfbBED error: error from server (NotFound): pods "monitoring-pmm3-rs0-0" not found in namespace "monitoring-pmm3-8293" ++ sleep 8 ++ cat /tmp/tmp.VJ7ZV6G062 ++ cat /tmp/tmp.t25lGfbBED error: error from server (NotFound): pods "monitoring-pmm3-rs0-0" not found in namespace "monitoring-pmm3-8293" ++ rm /tmp/tmp.VJ7ZV6G062 /tmp/tmp.t25lGfbBED ++ return 1 + [[ 0 != 0 ]] + helm uninstall monitoring release "monitoring" uninstalled + destroy monitoring-pmm3-8293 + local namespace=monitoring-pmm3-8293 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.tlbtCkqZhO +++ mktemp ++ local LAST_ERR=/tmp/tmp.PTuQKFEw1n ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tlbtCkqZhO ++ cat /tmp/tmp.PTuQKFEw1n No resources found in monitoring-pmm3-8293 namespace. ++ rm /tmp/tmp.tlbtCkqZhO /tmp/tmp.PTuQKFEw1n ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.8UEv0BcmEy ++ mktemp + local LAST_ERR=/tmp/tmp.TLfvLacc5x + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8UEv0BcmEy customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.TLfvLacc5x + rm /tmp/tmp.8UEv0BcmEy /tmp/tmp.TLfvLacc5x + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.VN8ltAi2fI ++ mktemp + local LAST_ERR=/tmp/tmp.rsbrv5vCJx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VN8ltAi2fI + cat /tmp/tmp.rsbrv5vCJx + rm /tmp/tmp.VN8ltAi2fI /tmp/tmp.rsbrv5vCJx + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.lX5NEEFgPp ++ mktemp + local LAST_ERR=/tmp/tmp.CKMLys7g9q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lX5NEEFgPp + cat /tmp/tmp.CKMLys7g9q + rm /tmp/tmp.lX5NEEFgPp /tmp/tmp.CKMLys7g9q + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.LxgPAmIfTc ++ mktemp + local LAST_ERR=/tmp/tmp.473ciAEWCl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LxgPAmIfTc + cat /tmp/tmp.473ciAEWCl + rm /tmp/tmp.LxgPAmIfTc /tmp/tmp.473ciAEWCl + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.PjW5NkzON3 ++ mktemp + local LAST_ERR=/tmp/tmp.LJYAmJNH9i + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1850/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PjW5NkzON3 clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.LJYAmJNH9i + rm /tmp/tmp.PjW5NkzON3 /tmp/tmp.LJYAmJNH9i + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.OOhzKuRfWN ++ mktemp + local LAST_ERR=/tmp/tmp.5k7TInVA5O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.OOhzKuRfWN namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted from cert-manager namespace serviceaccount "cert-manager" deleted from cert-manager namespace serviceaccount "cert-manager-webhook" deleted from cert-manager namespace clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager-tokenrequest" deleted from cert-manager namespace role.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted from cert-manager namespace rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace service "cert-manager-cainjector" deleted from cert-manager namespace service "cert-manager" deleted from cert-manager namespace service "cert-manager-webhook" deleted from cert-manager namespace mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.5k7TInVA5O Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.OOhzKuRfWN namespace "cert-manager" deleted + cat /tmp/tmp.5k7TInVA5O Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.OOhzKuRfWN + cat /tmp/tmp.5k7TInVA5O Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.OOhzKuRfWN + cat /tmp/tmp.5k7TInVA5O Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.OOhzKuRfWN /tmp/tmp.5k7TInVA5O + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + rm -rf /tmp/tmp.68JekPG774 + kubectl_bin delete --grace-period=0 --force=true namespace monitoring-pmm3-8293 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.PoG8jdmnwj ++ mktemp + local LAST_ERR=/tmp/tmp.xGyYRGEK7R + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.1XBGzwSECi + for i in $(seq 0 2) ++ mktemp + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + local LAST_ERR=/tmp/tmp.Ka2cyH8Eve + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace monitoring-pmm3-8293