Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/logs/monitoring-2-0.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + create_infra monitoring-2-0-17023 + local ns=monitoring-2-0-17023 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.K8ZtZrcDoS ++ mktemp + local LAST_ERR=/tmp/tmp.WJpnCPdsWf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.K8ZtZrcDoS customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.WJpnCPdsWf + rm /tmp/tmp.K8ZtZrcDoS /tmp/tmp.WJpnCPdsWf + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/deploy/crd.yaml grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.qeSOUm04rC ++ mktemp + local LAST_ERR=/tmp/tmp.lxca0uE5Zy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qeSOUm04rC + cat /tmp/tmp.lxca0uE5Zy + rm /tmp/tmp.qeSOUm04rC /tmp/tmp.lxca0uE5Zy + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.z1Z9figLlz ++ mktemp + local LAST_ERR=/tmp/tmp.KGfi1dL3jo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.z1Z9figLlz + cat /tmp/tmp.KGfi1dL3jo + rm /tmp/tmp.z1Z9figLlz /tmp/tmp.KGfi1dL3jo + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.N85IQHvYsq ++ mktemp + local LAST_ERR=/tmp/tmp.eSD9xTl9km + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.N85IQHvYsq + cat /tmp/tmp.eSD9xTl9km + rm /tmp/tmp.N85IQHvYsq /tmp/tmp.eSD9xTl9km + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.0qtTs8HykS ++ mktemp + local LAST_ERR=/tmp/tmp.JLd8qQMADm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0qtTs8HykS clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.JLd8qQMADm + rm /tmp/tmp.0qtTs8HykS /tmp/tmp.JLd8qQMADm + return 0 + check_crd_for_deletion PR-2287-ee944353 + local git_tag=PR-2287-ee944353 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2287-ee944353/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/sbin/sed s/---//g + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oQ7rEGRe4v +++ mktemp ++ local LAST_ERR=/tmp/tmp.x9SSwftfaU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.oQ7rEGRe4v ++ cat /tmp/tmp.x9SSwftfaU Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.oQ7rEGRe4v ++ cat /tmp/tmp.x9SSwftfaU Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.oQ7rEGRe4v ++ cat /tmp/tmp.x9SSwftfaU Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.oQ7rEGRe4v ++ cat /tmp/tmp.x9SSwftfaU Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.oQ7rEGRe4v /tmp/tmp.x9SSwftfaU ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ awk '{print $1}' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ awk '{print $1}' ++ grep chaos-mesh.org ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrole ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + local LAST_OUT=/tmp/tmp.Xtt86sdbB8 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.FbRDUa9xfW + local exit_status=0 + local timeout=4 + local LAST_OUT=/tmp/tmp.mwYZL2E0Ya ++ seq 0 2 ++ mktemp + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.A3Nzuqujaz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Xtt86sdbB8 + cat /tmp/tmp.FbRDUa9xfW + rm /tmp/tmp.Xtt86sdbB8 /tmp/tmp.FbRDUa9xfW + return 0 namespace "cert-manager" deleted namespace "monitoring-2-0-23184" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mwYZL2E0Ya namespace "psmdb-operator" deleted + cat /tmp/tmp.A3Nzuqujaz + rm /tmp/tmp.mwYZL2E0Ya /tmp/tmp.A3Nzuqujaz + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.D0gVXrZASi ++ mktemp + local LAST_ERR=/tmp/tmp.ToBBOfE4Sa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.D0gVXrZASi + cat /tmp/tmp.ToBBOfE4Sa + rm /tmp/tmp.D0gVXrZASi /tmp/tmp.ToBBOfE4Sa + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.J5le3OHpkw ++ mktemp + local LAST_ERR=/tmp/tmp.ynQpLXXeEY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.J5le3OHpkw namespace/psmdb-operator created + cat /tmp/tmp.ynQpLXXeEY + rm /tmp/tmp.J5le3OHpkw /tmp/tmp.ynQpLXXeEY + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.890VUyGZwg +++ mktemp ++ local LAST_ERR=/tmp/tmp.OqHTkZ0ssZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.890VUyGZwg ++ cat /tmp/tmp.OqHTkZ0ssZ ++ rm /tmp/tmp.890VUyGZwg /tmp/tmp.OqHTkZ0ssZ ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2287-ee944353-4-cluster13 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.MwztfRZEuh ++ mktemp + local LAST_ERR=/tmp/tmp.m7VFirPj6t + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2287-ee944353-4-cluster13 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MwztfRZEuh Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2287-ee944353-4-cluster13" modified. + cat /tmp/tmp.m7VFirPj6t + rm /tmp/tmp.MwztfRZEuh /tmp/tmp.m7VFirPj6t + return 0 + deploy_operator + desc 'start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2287-ee944353' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2287-ee944353 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.xBRd2KtoQm ++ mktemp + local LAST_ERR=/tmp/tmp.XvdGu0aLr0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xBRd2KtoQm customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.XvdGu0aLr0 + rm /tmp/tmp.xBRd2KtoQm /tmp/tmp.XvdGu0aLr0 + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.OjfXkHHBGj ++ mktemp + local LAST_ERR=/tmp/tmp.93t2uVjJTB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OjfXkHHBGj clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.93t2uVjJTB + rm /tmp/tmp.OjfXkHHBGj /tmp/tmp.93t2uVjJTB + return 0 + yq eval ' (.spec.template.spec.containers[].image = "docker.io/perconalab/percona-server-mongodb-operator:PR-2287-ee944353") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.3jpjqjg1p6 ++ mktemp + local LAST_ERR=/tmp/tmp.ai0La1TnFo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3jpjqjg1p6 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.ai0La1TnFo + rm /tmp/tmp.3jpjqjg1p6 /tmp/tmp.ai0La1TnFo + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.k4KMWgLxxA +++ mktemp ++ local LAST_ERR=/tmp/tmp.7AcoffohNs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.k4KMWgLxxA ++ cat /tmp/tmp.7AcoffohNs ++ rm /tmp/tmp.k4KMWgLxxA /tmp/tmp.7AcoffohNs ++ return 0 + wait_operator_pod percona-server-mongodb-operator-5467dbf546-fr85n + local pod=percona-server-mongodb-operator-5467dbf546-fr85n + set +o xtrace waiting for pod/percona-server-mongodb-operator-5467dbf546-fr85n to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.rKgixr6AGl +++ mktemp ++ local LAST_ERR=/tmp/tmp.5ePx0j1wsA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rKgixr6AGl ++ cat /tmp/tmp.5ePx0j1wsA ++ rm /tmp/tmp.rKgixr6AGl /tmp/tmp.5ePx0j1wsA ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-5467dbf546-fr85n ++ mktemp + local LAST_OUT=/tmp/tmp.ZB2Zytxgar ++ mktemp + local LAST_ERR=/tmp/tmp.ePR46WPK0d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-5467dbf546-fr85n + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZB2Zytxgar + cat /tmp/tmp.ePR46WPK0d + rm /tmp/tmp.ZB2Zytxgar /tmp/tmp.ePR46WPK0d + return 0 2026-03-26T09:35:01.027Z INFO setup Manager starting up {"gitCommit": "ee94435304adb1efd9e68c7306fd5cf40176592e", "gitBranch": "PR-2287-ee944353", "buildTime": "", "goVersion": "go1.25.8", "os": "linux", "arch": "amd64"} + create_namespace monitoring-2-0-17023 + local namespace=monitoring-2-0-17023 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns ++ mktemp + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces monitoring-2-0-17023' + set +o xtrace + xargs kubectl delete ns ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-2-0-17023 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-17023 --ignore-not-found + local LAST_OUT=/tmp/tmp.eA6MrBxA9I ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.gpKZf6KeLd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_OUT=/tmp/tmp.jQjXYfWTi7 ++ mktemp + local LAST_ERR=/tmp/tmp.S1lwN6Etcr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace monitoring-2-0-17023 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eA6MrBxA9I + cat /tmp/tmp.gpKZf6KeLd + rm /tmp/tmp.eA6MrBxA9I /tmp/tmp.gpKZf6KeLd + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jQjXYfWTi7 + cat /tmp/tmp.S1lwN6Etcr + rm /tmp/tmp.jQjXYfWTi7 /tmp/tmp.S1lwN6Etcr + return 0 + kubectl_bin wait --for=delete namespace monitoring-2-0-17023 ++ mktemp + local LAST_OUT=/tmp/tmp.iNYPVfYK0f ++ mktemp + local LAST_ERR=/tmp/tmp.7NJe18J0Tr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace monitoring-2-0-17023 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iNYPVfYK0f + cat /tmp/tmp.7NJe18J0Tr + rm /tmp/tmp.iNYPVfYK0f /tmp/tmp.7NJe18J0Tr + return 0 + desc 'create namespace monitoring-2-0-17023' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-17023 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-17023 ++ mktemp + local LAST_OUT=/tmp/tmp.7IJQfBMCIY ++ mktemp + local LAST_ERR=/tmp/tmp.wJ47UT0fNm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace monitoring-2-0-17023 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7IJQfBMCIY namespace/monitoring-2-0-17023 created + cat /tmp/tmp.wJ47UT0fNm + rm /tmp/tmp.7IJQfBMCIY /tmp/tmp.wJ47UT0fNm + return 0 + set_kube_ctx monitoring-2-0-17023 + local namespace=monitoring-2-0-17023 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.a3rY4uQmUX +++ mktemp ++ local LAST_ERR=/tmp/tmp.jmISloz2GS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.a3rY4uQmUX ++ cat /tmp/tmp.jmISloz2GS ++ rm /tmp/tmp.a3rY4uQmUX /tmp/tmp.jmISloz2GS ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2287-ee944353-4-cluster13 --namespace=monitoring-2-0-17023 ++ mktemp + local LAST_OUT=/tmp/tmp.Me3dkuJjkw ++ mktemp + local LAST_ERR=/tmp/tmp.x4zvmXvaWo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2287-ee944353-4-cluster13 --namespace=monitoring-2-0-17023 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Me3dkuJjkw Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2287-ee944353-4-cluster13" modified. + cat /tmp/tmp.x4zvmXvaWo + rm /tmp/tmp.Me3dkuJjkw /tmp/tmp.x4zvmXvaWo + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.VFxE5jyxPF ++ mktemp + local LAST_ERR=/tmp/tmp.WgNZi6YT2O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VFxE5jyxPF namespace/cert-manager created + cat /tmp/tmp.WgNZi6YT2O + rm /tmp/tmp.VFxE5jyxPF /tmp/tmp.WgNZi6YT2O + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.c4nE2UlNpQ ++ mktemp + local LAST_ERR=/tmp/tmp.W5KgfBxJn2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.c4nE2UlNpQ namespace/cert-manager labeled + cat /tmp/tmp.W5KgfBxJn2 + rm /tmp/tmp.c4nE2UlNpQ /tmp/tmp.W5KgfBxJn2 + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.QDumdiVJSz ++ mktemp + local LAST_ERR=/tmp/tmp.aYPUGO2Pzx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QDumdiVJSz namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.aYPUGO2Pzx Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.QDumdiVJSz /tmp/tmp.aYPUGO2Pzx + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.dgdtP87kUm ++ mktemp + local LAST_ERR=/tmp/tmp.twKxbp1iME + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dgdtP87kUm pod/cert-manager-559d798845-bmljk condition met pod/cert-manager-cainjector-64958d9c7c-kpcl7 condition met pod/cert-manager-webhook-7fb6f99b56-75ccg condition met + cat /tmp/tmp.twKxbp1iME + rm /tmp/tmp.dgdtP87kUm /tmp/tmp.twKxbp1iME + return 0 + sleep 120 + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + helm repo remove stable "stable" has been removed from your repositories + helm repo add stable https://charts.helm.sh/stable "stable" has been added to your repositories + [[ -n '' ]] + retry 10 60 helm install monitoring --set imageTag=dev-latest --set imageRepo=docker.io/perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageTag=dev-latest --set imageRepo=docker.io/perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Thu Mar 26 09:38:06 2026 NAMESPACE: monitoring-2-0-17023 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-17023.svc.cluster.local:443 login: admin password: admin + sleep 40 + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.asYlcDn1u8 ++ mktemp + local LAST_ERR=/tmp/tmp.Uy5uHFfoOW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.asYlcDn1u8 + cat /tmp/tmp.Uy5uHFfoOW + rm /tmp/tmp.asYlcDn1u8 /tmp/tmp.Uy5uHFfoOW + return 0 + cluster=monitoring + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.n2L4vyRigx ++ mktemp + local LAST_ERR=/tmp/tmp.bZa2ww6mv3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.n2L4vyRigx secret/some-users created secret/some-users unchanged + cat /tmp/tmp.bZa2ww6mv3 + rm /tmp/tmp.n2L4vyRigx /tmp/tmp.bZa2ww6mv3 + return 0 + yq '.spec.template.spec.volumes[0].secret.secretName="monitoring-ssl"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/conf/client_with_tls.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.jj1MCfFEVs ++ mktemp + local LAST_ERR=/tmp/tmp.cRgfPmVHba + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jj1MCfFEVs deployment.apps/psmdb-client created + cat /tmp/tmp.cRgfPmVHba + rm /tmp/tmp.jj1MCfFEVs /tmp/tmp.cRgfPmVHba + return 0 + sleep 90 + desc 'create first PSMDB cluster monitoring' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster monitoring ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2287-ee944353"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' ++ mktemp + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' + /usr/sbin/sed -e s/NAME_SPACE/monitoring-2-0-17023/g + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.5HH4Uy0dpR ++ mktemp + local LAST_ERR=/tmp/tmp.wLT4FVDfZP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5HH4Uy0dpR perconaservermongodb.psmdb.percona.com/monitoring created + cat /tmp/tmp.wLT4FVDfZP + rm /tmp/tmp.5HH4Uy0dpR /tmp/tmp.wLT4FVDfZP + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oLPnoG7rSh +++ mktemp ++ local LAST_ERR=/tmp/tmp.4FSk016uqy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oLPnoG7rSh ++ cat /tmp/tmp.4FSk016uqy ++ rm /tmp/tmp.oLPnoG7rSh /tmp/tmp.4FSk016uqy ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready.............OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Yyqlg4FjwD +++ mktemp ++ local LAST_ERR=/tmp/tmp.2hLAt8A77C ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Yyqlg4FjwD ++ cat /tmp/tmp.2hLAt8A77C ++ rm /tmp/tmp.Yyqlg4FjwD /tmp/tmp.2hLAt8A77C ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.acvUfebWgr +++ mktemp ++ local LAST_ERR=/tmp/tmp.6OXxzHYwUg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.acvUfebWgr ++ cat /tmp/tmp.6OXxzHYwUg ++ rm /tmp/tmp.acvUfebWgr /tmp/tmp.6OXxzHYwUg ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.......................... + desc 'check if pmm-client container is not enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container is not enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 -no-pmm + local resource=statefulset/monitoring-rs0 + local postfix=-no-pmm + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml + local new_result=/tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-17023", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.mViGwvPY6T ++ mktemp + local LAST_ERR=/tmp/tmp.4sGlbse7me + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mViGwvPY6T + cat /tmp/tmp.4sGlbse7me + rm /tmp/tmp.mViGwvPY6T /tmp/tmp.4sGlbse7me + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml /tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-rs0.yml + log 'compare_kubectl: statefulset/monitoring-rs0 OK' + set +o xtrace [2026-03-26T09:42:56+0000] compare_kubectl: statefulset/monitoring-rs0 OK + sleep 10 + custom_port=27019 + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-17023 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-17023 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zdArGume1C +++ mktemp ++ local LAST_ERR=/tmp/tmp.oABmOdKYmH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zdArGume1C ++ cat /tmp/tmp.oABmOdKYmH ++ rm /tmp/tmp.zdArGume1C /tmp/tmp.oABmOdKYmH ++ return 0 + local client_container=psmdb-client-699f458f75-zpjhc + kubectl_bin exec psmdb-client-699f458f75-zpjhc -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-17023.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.lmxeABq3ly ++ mktemp + local LAST_ERR=/tmp/tmp.pvxEmeKrKq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-zpjhc -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-17023.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lmxeABq3ly Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-17023.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-26T09:43:08.585Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("c1fe3d72-8811-40b7-add1-c7460a983f99") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.pvxEmeKrKq + rm /tmp/tmp.lmxeABq3ly /tmp/tmp.pvxEmeKrKq + return 0 + run_mongos 'sh.enableSharding("myApp")' clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-17023 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=sh.enableSharding("myApp")' + local uri=clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-17023 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eSMrSJR7k4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.rKDWjtzrRc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eSMrSJR7k4 ++ cat /tmp/tmp.rKDWjtzrRc ++ rm /tmp/tmp.eSMrSJR7k4 /tmp/tmp.rKDWjtzrRc ++ return 0 + local client_container=psmdb-client-699f458f75-zpjhc + kubectl_bin exec psmdb-client-699f458f75-zpjhc -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-17023.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.vvicUxHHXW ++ mktemp + local LAST_ERR=/tmp/tmp.Nj0GsAyjZq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-zpjhc -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-17023.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vvicUxHHXW Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-17023.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-26T09:43:10.791Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("fe468f82-6d95-422d-97f5-5f5d7a38f6bb") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1774518190, 9), "signature" : { "hash" : BinData(0,"bgLTIYE7+OAz/n/MeIsKKcDFMMA="), "keyId" : NumberLong("7621497205660057601") } }, "operationTime" : Timestamp(1774518190, 6) } bye + cat /tmp/tmp.Nj0GsAyjZq + rm /tmp/tmp.vvicUxHHXW /tmp/tmp.Nj0GsAyjZq + return 0 + insert_data_mongos 100500 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100500 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@monitoring-mongos.monitoring-2-0-17023 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-17023 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OEksFqQOZA +++ mktemp ++ local LAST_ERR=/tmp/tmp.hVcEuJDZDe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OEksFqQOZA ++ cat /tmp/tmp.hVcEuJDZDe ++ rm /tmp/tmp.OEksFqQOZA /tmp/tmp.hVcEuJDZDe ++ return 0 + local client_container=psmdb-client-699f458f75-zpjhc + kubectl_bin exec psmdb-client-699f458f75-zpjhc -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-17023.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.RR4UPNR094 ++ mktemp + local LAST_ERR=/tmp/tmp.3lcVB6Pw7U + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-zpjhc -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-17023.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RR4UPNR094 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-17023.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-26T09:43:12.976Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("b0142bdd-fae8-45e6-9d03-9bd210f9d0b3") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.3lcVB6Pw7U + rm /tmp/tmp.RR4UPNR094 /tmp/tmp.3lcVB6Pw7U + return 0 + insert_data_mongos 100600 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100600 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100600 })' myApp:myPass@monitoring-mongos.monitoring-2-0-17023 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100600 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-17023 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DMymWVYMq9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.08imHZVd1n ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DMymWVYMq9 ++ cat /tmp/tmp.08imHZVd1n ++ rm /tmp/tmp.DMymWVYMq9 /tmp/tmp.08imHZVd1n ++ return 0 + local client_container=psmdb-client-699f458f75-zpjhc + kubectl_bin exec psmdb-client-699f458f75-zpjhc -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-17023.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.v4Z2cRTh00 ++ mktemp + local LAST_ERR=/tmp/tmp.XmYsulRCs3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-zpjhc -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-17023.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.v4Z2cRTh00 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-17023.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-26T09:43:15.867Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("004d4a5f-2cf7-4f65-8dcb-3e707fb9b06e") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.XmYsulRCs3 + rm /tmp/tmp.v4Z2cRTh00 /tmp/tmp.XmYsulRCs3 + return 0 + insert_data_mongos 100700 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100700 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100700 })' myApp:myPass@monitoring-mongos.monitoring-2-0-17023 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100700 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-17023 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.r2KjxnRn1P +++ mktemp ++ local LAST_ERR=/tmp/tmp.tMEnYapEdG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.r2KjxnRn1P ++ cat /tmp/tmp.tMEnYapEdG ++ rm /tmp/tmp.r2KjxnRn1P /tmp/tmp.tMEnYapEdG ++ return 0 + local client_container=psmdb-client-699f458f75-zpjhc + kubectl_bin exec psmdb-client-699f458f75-zpjhc -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-17023.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.il4tRp1C6W ++ mktemp + local LAST_ERR=/tmp/tmp.rZTgicZJDh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-zpjhc -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-17023.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.il4tRp1C6W Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-17023.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-26T09:43:17.911Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("fe1b8606-5221-44d2-8553-c5bb9a34061c") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.rZTgicZJDh + rm /tmp/tmp.il4tRp1C6W /tmp/tmp.rZTgicZJDh + return 0 + desc 'add PMM_SERVER_API_KEY for secret some-users' + set +o xtrace ----------------------------------------------------------------------------------- add PMM_SERVER_API_KEY for secret some-users ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.HurEdtUSbc +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.rChN78x6z6 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.HurEdtUSbc ++++ cat /tmp/tmp.rChN78x6z6 ++++ rm /tmp/tmp.HurEdtUSbc /tmp/tmp.rChN78x6z6 ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.YLvUnGY0sP +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.bUUbxq7UEl ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.YLvUnGY0sP ++++ cat /tmp/tmp.bUUbxq7UEl ++++ rm /tmp/tmp.YLvUnGY0sP /tmp/tmp.bUUbxq7UEl ++++ return 0 +++ local ip=34.27.251.120 +++ '[' -n 34.27.251.120 -a 34.27.251.120 '!=' null ']' +++ echo 34.27.251.120 +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@34.27.251.120/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 155 100 119 100 36 296 89 --:--:-- --:--:-- --:--:-- 386 + API_KEY='"eyJrIjoiUHZUVzU3TUk5RUU0b0hEMmVicWxJWnhDN0hObkpQOG4iLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' + kubectl_bin patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiUHZUVzU3TUk5RUU0b0hEMmVicWxJWnhDN0hObkpQOG4iLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' ++ mktemp + local LAST_OUT=/tmp/tmp.gF9W1AiGGN ++ mktemp + local LAST_ERR=/tmp/tmp.q6PsXDxGhN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiUHZUVzU3TUk5RUU0b0hEMmVicWxJWnhDN0hObkpQOG4iLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gF9W1AiGGN secret/some-users patched + cat /tmp/tmp.q6PsXDxGhN + rm /tmp/tmp.gF9W1AiGGN /tmp/tmp.q6PsXDxGhN + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vonDAz5JTV +++ mktemp ++ local LAST_ERR=/tmp/tmp.2pQ8j4xX0d ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vonDAz5JTV ++ cat /tmp/tmp.2pQ8j4xX0d ++ rm /tmp/tmp.vonDAz5JTV /tmp/tmp.2pQ8j4xX0d ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready.OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Xq6eba0baY +++ mktemp ++ local LAST_ERR=/tmp/tmp.UfAQHKn1Tl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Xq6eba0baY ++ cat /tmp/tmp.UfAQHKn1Tl ++ rm /tmp/tmp.Xq6eba0baY /tmp/tmp.UfAQHKn1Tl ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PBfAV4vcEo +++ mktemp ++ local LAST_ERR=/tmp/tmp.t1uxJPl9Xr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PBfAV4vcEo ++ cat /tmp/tmp.t1uxJPl9Xr ++ rm /tmp/tmp.PBfAV4vcEo /tmp/tmp.t1uxJPl9Xr ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness............................................................................................................................................................. + sleep 90 + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 + local resource=statefulset/monitoring-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml + local new_result=/tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-17023", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.23tNRYf24c ++ mktemp + local LAST_ERR=/tmp/tmp.OTp1oENLVh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.23tNRYf24c + cat /tmp/tmp.OTp1oENLVh + rm /tmp/tmp.23tNRYf24c /tmp/tmp.OTp1oENLVh + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml /tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-rs0.yml + log 'compare_kubectl: statefulset/monitoring-rs0 OK' + set +o xtrace [2026-03-26T09:50:23+0000] compare_kubectl: statefulset/monitoring-rs0 OK + compare_kubectl service/monitoring-rs0 + local resource=service/monitoring-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml + local new_result=/tmp/tmp.Nkr2LYGrdi/service_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-17023", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.OJIqJIEZwp ++ mktemp + local LAST_ERR=/tmp/tmp.uBxHU6ydnu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OJIqJIEZwp + cat /tmp/tmp.uBxHU6ydnu + rm /tmp/tmp.OJIqJIEZwp /tmp/tmp.uBxHU6ydnu + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Nkr2LYGrdi/service_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Nkr2LYGrdi/service_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Nkr2LYGrdi/service_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml /tmp/tmp.Nkr2LYGrdi/service_monitoring-rs0.yml + log 'compare_kubectl: service/monitoring-rs0 OK' + set +o xtrace [2026-03-26T09:50:24+0000] compare_kubectl: service/monitoring-rs0 OK + compare_kubectl service/monitoring-mongos + local resource=service/monitoring-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml + local new_result=/tmp/tmp.Nkr2LYGrdi/service_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-17023", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.RHrhOlx3g9 ++ mktemp + local LAST_ERR=/tmp/tmp.DtemozYXsu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RHrhOlx3g9 + cat /tmp/tmp.DtemozYXsu + rm /tmp/tmp.RHrhOlx3g9 /tmp/tmp.DtemozYXsu + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Nkr2LYGrdi/service_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Nkr2LYGrdi/service_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Nkr2LYGrdi/service_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml /tmp/tmp.Nkr2LYGrdi/service_monitoring-mongos.yml + log 'compare_kubectl: service/monitoring-mongos OK' + set +o xtrace [2026-03-26T09:50:25+0000] compare_kubectl: service/monitoring-mongos OK + compare_kubectl statefulset/monitoring-cfg + local resource=statefulset/monitoring-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml + local new_result=/tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-cfg ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-17023", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.K8py8wPiQH ++ mktemp + local LAST_ERR=/tmp/tmp.sAsLsjF0OT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.K8py8wPiQH + cat /tmp/tmp.sAsLsjF0OT + rm /tmp/tmp.K8py8wPiQH /tmp/tmp.sAsLsjF0OT + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-cfg.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml /tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-cfg.yml + log 'compare_kubectl: statefulset/monitoring-cfg OK' + set +o xtrace [2026-03-26T09:50:26+0000] compare_kubectl: statefulset/monitoring-cfg OK + compare_kubectl statefulset/monitoring-mongos + local resource=statefulset/monitoring-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml + local new_result=/tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-17023", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.QquXNJHeCm ++ mktemp + local LAST_ERR=/tmp/tmp.On0o95sTrR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QquXNJHeCm + cat /tmp/tmp.On0o95sTrR + rm /tmp/tmp.QquXNJHeCm /tmp/tmp.On0o95sTrR + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml /tmp/tmp.Nkr2LYGrdi/statefulset_monitoring-mongos.yml + log 'compare_kubectl: statefulset/monitoring-mongos OK' + set +o xtrace [2026-03-26T09:50:27+0000] compare_kubectl: statefulset/monitoring-mongos OK + desc 'check mongod metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongod metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-17023-monitoring-rs0-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-17023-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774518567 ++ /usr/sbin/date -u +%s + local end=1774518627 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qQyYq5HBiM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.eduou8a6lo +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.qQyYq5HBiM +++ cat /tmp/tmp.eduou8a6lo +++ rm /tmp/tmp.qQyYq5HBiM /tmp/tmp.eduou8a6lo +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.prfILjIek2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UpVPP9kJxA +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.prfILjIek2 +++ cat /tmp/tmp.UpVPP9kJxA +++ rm /tmp/tmp.prfILjIek2 /tmp/tmp.UpVPP9kJxA +++ return 0 ++ local ip=34.27.251.120 ++ '[' -n 34.27.251.120 -a 34.27.251.120 '!=' null ']' ++ echo 34.27.251.120 ++ return + local endpoint=34.27.251.120 + curl -s -k 'https://admin:admin@34.27.251.120/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-17023-monitoring-rs0-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-17023-monitoring-rs0-1%22%7D%29&start=1774518567&end=1774518627&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "1774511709" "1774511709" + get_metric_values mongodb_connections monitoring-2-0-17023-monitoring-rs0-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-17023-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774518569 ++ /usr/sbin/date -u +%s + local end=1774518629 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CoOJGt9B7E ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Pw4qrp3DCB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.CoOJGt9B7E +++ cat /tmp/tmp.Pw4qrp3DCB +++ rm /tmp/tmp.CoOJGt9B7E /tmp/tmp.Pw4qrp3DCB +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ieRyfAtlMh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.chjC2HKeJC +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ieRyfAtlMh +++ cat /tmp/tmp.chjC2HKeJC +++ rm /tmp/tmp.ieRyfAtlMh /tmp/tmp.chjC2HKeJC +++ return 0 ++ local ip=34.27.251.120 ++ '[' -n 34.27.251.120 -a 34.27.251.120 '!=' null ']' ++ echo 34.27.251.120 ++ return + local endpoint=34.27.251.120 + jq '.data.result[0].values[][1]' + grep '^"[0-9]' + curl -s -k 'https://admin:admin@34.27.251.120/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-17023-monitoring-rs0-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-17023-monitoring-rs0-1%22%7D%29&start=1774518569&end=1774518629&step=60' "0" "0" + desc 'check mongo config metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongo config metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-17023-monitoring-cfg-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-17023-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774518571 ++ /usr/sbin/date -u +%s + local end=1774518631 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.BOcfox6XF3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.rH8gMbZ1AN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.BOcfox6XF3 +++ cat /tmp/tmp.rH8gMbZ1AN +++ rm /tmp/tmp.BOcfox6XF3 /tmp/tmp.rH8gMbZ1AN +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.bHsdNwrWi6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ep6H1iKNzG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.bHsdNwrWi6 +++ cat /tmp/tmp.ep6H1iKNzG +++ rm /tmp/tmp.bHsdNwrWi6 /tmp/tmp.ep6H1iKNzG +++ return 0 ++ local ip=34.27.251.120 ++ '[' -n 34.27.251.120 -a 34.27.251.120 '!=' null ']' ++ echo 34.27.251.120 ++ return + local endpoint=34.27.251.120 + curl -s -k 'https://admin:admin@34.27.251.120/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-17023-monitoring-cfg-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-17023-monitoring-cfg-1%22%7D%29&start=1774518571&end=1774518631&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "1774511709" "1774511709" + get_metric_values mongodb_connections monitoring-2-0-17023-monitoring-cfg-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-17023-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774518573 ++ /usr/sbin/date -u +%s + local end=1774518633 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pXhBRnuoqQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iguWGQguWZ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.pXhBRnuoqQ +++ cat /tmp/tmp.iguWGQguWZ +++ rm /tmp/tmp.pXhBRnuoqQ /tmp/tmp.iguWGQguWZ +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Tpgt2c8ArW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Q0LDQGkd1R +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Tpgt2c8ArW +++ cat /tmp/tmp.Q0LDQGkd1R +++ rm /tmp/tmp.Tpgt2c8ArW /tmp/tmp.Q0LDQGkd1R +++ return 0 ++ local ip=34.27.251.120 ++ '[' -n 34.27.251.120 -a 34.27.251.120 '!=' null ']' ++ echo 34.27.251.120 ++ return + local endpoint=34.27.251.120 + curl -s -k 'https://admin:admin@34.27.251.120/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-17023-monitoring-cfg-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-17023-monitoring-cfg-1%22%7D%29&start=1774518573&end=1774518633&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "0" "0" + desc 'check mongos metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongos metrics ----------------------------------------------------------------------------------- ++ kubectl get pod -l app.kubernetes.io/component=mongos -o 'jsonpath={.items[0].metadata.name}' + MONGOS_POD_NAME=monitoring-mongos-0 + get_metric_values node_boot_time_seconds monitoring-2-0-17023-monitoring-mongos-0 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-17023-monitoring-mongos-0 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774518577 ++ /usr/sbin/date -u +%s + local end=1774518637 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zevdmY4oq8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6ChJm6SeF5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.zevdmY4oq8 +++ cat /tmp/tmp.6ChJm6SeF5 +++ rm /tmp/tmp.zevdmY4oq8 /tmp/tmp.6ChJm6SeF5 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.f7dwdnGy1u ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UCOU95kKAs +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.f7dwdnGy1u +++ cat /tmp/tmp.UCOU95kKAs +++ rm /tmp/tmp.f7dwdnGy1u /tmp/tmp.UCOU95kKAs +++ return 0 ++ local ip=34.27.251.120 ++ '[' -n 34.27.251.120 -a 34.27.251.120 '!=' null ']' ++ echo 34.27.251.120 ++ return + local endpoint=34.27.251.120 + curl -s -k 'https://admin:admin@34.27.251.120/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-17023-monitoring-mongos-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-17023-monitoring-mongos-0%22%7D%29&start=1774518577&end=1774518637&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "1774517324" "1774517324" + sleep 90 + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan_values mongodb dev-mongod admin:admin + local service_type=mongodb + local environment=dev-mongod + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/sbin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2026-03-25T21:52:09+00:00 ++ /usr/sbin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2026-03-26T09:52:09+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Rpm8k3yMIC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qFjmayVqEE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Rpm8k3yMIC +++ cat /tmp/tmp.qFjmayVqEE +++ rm /tmp/tmp.Rpm8k3yMIC /tmp/tmp.qFjmayVqEE +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.sl5URQahSM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iFxUbdgcOt +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.sl5URQahSM +++ cat /tmp/tmp.iFxUbdgcOt +++ rm /tmp/tmp.sl5URQahSM /tmp/tmp.iFxUbdgcOt +++ return 0 ++ local ip=34.27.251.120 ++ '[' -n 34.27.251.120 -a 34.27.251.120 '!=' null ']' ++ echo 34.27.251.120 ++ return + endpoint=34.27.251.120 + cat + local response + retry=0 ++ curl -s -k -XPOST -d @payload.json https://admin:admin@34.27.251.120/v0/qan/GetReport ++ jq '.rows[].sparkline' + [[ [ { "time_frame": 360, "timestamp": "2026-03-26T09:52:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-26T09:46:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-26T09:40:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-26T09:34:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-26T09:28:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-26T09:22:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-26T09:16:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-26T09:10:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-26T09:04:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-26T08:58:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-26T08:52:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-26T08:46:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-26T08:40:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-26T08:34:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-26T08:28:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-26T08:22:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-26T08:16:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-26T08:10:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-26T08:04:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-26T07:58:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-26T07:52:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-26T07:46:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-26T07:40:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-26T07:34:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-26T07:28:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-26T07:22:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-26T07:16:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-26T07:10:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-26T07:04:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-26T06:58:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-26T06:52:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-26T06:46:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-26T06:40:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-26T06:34:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-26T06:28:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-26T06:22:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-26T06:16:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-26T06:10:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-26T06:04:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-26T05:58:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-26T05:52:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-26T05:46:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-26T05:40:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-26T05:34:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-26T05:28:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-26T05:22:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-26T05:16:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-26T05:10:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-26T05:04:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-26T04:58:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-26T04:52:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-26T04:46:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-26T04:40:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-26T04:34:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-26T04:28:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-26T04:22:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-26T04:16:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-26T04:10:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-26T04:04:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-26T03:58:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-26T03:52:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-26T03:46:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-26T03:40:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-26T03:34:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-26T03:28:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-26T03:22:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-26T03:16:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-26T03:10:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-26T03:04:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-26T02:58:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-26T02:52:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-26T02:46:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-26T02:40:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-26T02:34:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-26T02:28:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-26T02:22:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-26T02:16:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-26T02:10:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-26T02:04:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-26T01:58:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-26T01:52:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-26T01:46:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-26T01:40:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-26T01:34:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-26T01:28:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-26T01:22:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-26T01:16:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-26T01:10:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-26T01:04:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-26T00:58:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-26T00:52:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-26T00:46:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-26T00:40:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-26T00:34:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-26T00:28:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-26T00:22:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-26T00:16:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-26T00:10:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-26T00:04:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-25T23:58:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-25T23:52:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-25T23:46:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-25T23:40:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-25T23:34:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-25T23:28:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-25T23:22:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-25T23:16:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-25T23:10:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-25T23:04:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-25T22:58:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-25T22:52:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-25T22:46:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-25T22:40:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-25T22:34:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-25T22:28:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-25T22:22:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-25T22:16:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-25T22:10:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-25T22:04:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-25T21:58:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-26T09:52:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-26T09:46:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-26T09:40:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-26T09:34:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-26T09:28:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-26T09:22:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-26T09:16:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-26T09:10:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-26T09:04:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-26T08:58:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-26T08:52:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-26T08:46:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-26T08:40:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-26T08:34:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-26T08:28:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-26T08:22:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-26T08:16:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-26T08:10:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-26T08:04:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-26T07:58:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-26T07:52:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-26T07:46:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-26T07:40:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-26T07:34:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-26T07:28:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-26T07:22:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-26T07:16:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-26T07:10:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-26T07:04:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-26T06:58:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-26T06:52:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-26T06:46:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-26T06:40:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-26T06:34:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-26T06:28:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-26T06:22:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-26T06:16:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-26T06:10:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-26T06:04:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-26T05:58:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-26T05:52:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-26T05:46:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-26T05:40:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-26T05:34:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-26T05:28:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-26T05:22:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-26T05:16:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-26T05:10:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-26T05:04:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-26T04:58:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-26T04:52:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-26T04:46:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-26T04:40:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-26T04:34:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-26T04:28:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-26T04:22:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-26T04:16:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-26T04:10:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-26T04:04:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-26T03:58:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-26T03:52:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-26T03:46:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-26T03:40:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-26T03:34:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-26T03:28:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-26T03:22:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-26T03:16:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-26T03:10:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-26T03:04:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-26T02:58:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-26T02:52:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-26T02:46:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-26T02:40:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-26T02:34:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-26T02:28:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-26T02:22:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-26T02:16:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-26T02:10:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-26T02:04:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-26T01:58:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-26T01:52:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-26T01:46:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-26T01:40:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-26T01:34:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-26T01:28:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-26T01:22:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-26T01:16:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-26T01:10:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-26T01:04:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-26T00:58:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-26T00:52:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-26T00:46:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-26T00:40:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-26T00:34:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-26T00:28:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-26T00:22:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-26T00:16:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-26T00:10:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-26T00:04:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-25T23:58:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-25T23:52:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-25T23:46:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-25T23:40:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-25T23:34:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-25T23:28:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-25T23:22:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-25T23:16:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-25T23:10:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-25T23:04:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-25T22:58:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-25T22:52:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-25T22:46:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-25T22:40:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-25T22:34:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-25T22:28:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-25T22:22:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-25T22:16:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-25T22:10:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-25T22:04:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-25T21:58:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-26T09:52:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-26T09:46:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-26T09:40:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-26T09:34:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-26T09:28:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-26T09:22:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-26T09:16:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-26T09:10:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-26T09:04:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-26T08:58:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-26T08:52:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-26T08:46:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-26T08:40:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-26T08:34:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-26T08:28:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-26T08:22:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-26T08:16:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-26T08:10:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-26T08:04:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-26T07:58:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-26T07:52:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-26T07:46:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-26T07:40:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-26T07:34:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-26T07:28:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-26T07:22:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-26T07:16:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-26T07:10:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-26T07:04:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-26T06:58:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-26T06:52:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-26T06:46:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-26T06:40:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-26T06:34:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-26T06:28:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-26T06:22:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-26T06:16:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-26T06:10:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-26T06:04:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-26T05:58:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-26T05:52:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-26T05:46:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-26T05:40:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-26T05:34:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-26T05:28:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-26T05:22:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-26T05:16:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-26T05:10:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-26T05:04:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-26T04:58:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-26T04:52:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-26T04:46:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-26T04:40:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-26T04:34:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-26T04:28:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-26T04:22:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-26T04:16:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-26T04:10:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-26T04:04:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-26T03:58:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-26T03:52:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-26T03:46:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-26T03:40:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-26T03:34:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-26T03:28:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-26T03:22:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-26T03:16:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-26T03:10:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-26T03:04:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-26T02:58:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-26T02:52:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-26T02:46:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-26T02:40:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-26T02:34:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-26T02:28:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-26T02:22:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-26T02:16:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-26T02:10:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-26T02:04:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-26T01:58:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-26T01:52:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-26T01:46:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-26T01:40:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-26T01:34:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-26T01:28:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-26T01:22:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-26T01:16:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-26T01:10:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-26T01:04:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-26T00:58:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-26T00:52:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-26T00:46:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-26T00:40:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-26T00:34:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-26T00:28:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-26T00:22:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-26T00:16:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-26T00:10:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-26T00:04:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-25T23:58:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-25T23:52:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-25T23:46:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-25T23:40:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-25T23:34:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-25T23:28:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-25T23:22:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-25T23:16:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-25T23:10:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-25T23:04:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-25T22:58:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-25T22:52:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-25T22:46:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-25T22:40:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-25T22:34:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-25T22:28:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-25T22:22:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-25T22:16:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-25T22:10:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-25T22:04:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-25T21:58:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-26T09:52:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-26T09:46:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-26T09:40:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-26T09:34:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-26T09:28:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-26T09:22:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-26T09:16:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-26T09:10:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-26T09:04:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-26T08:58:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-26T08:52:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-26T08:46:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-26T08:40:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-26T08:34:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-26T08:28:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-26T08:22:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-26T08:16:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-26T08:10:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-26T08:04:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-26T07:58:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-26T07:52:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-26T07:46:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-26T07:40:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-26T07:34:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-26T07:28:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-26T07:22:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-26T07:16:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-26T07:10:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-26T07:04:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-26T06:58:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-26T06:52:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-26T06:46:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-26T06:40:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-26T06:34:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-26T06:28:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-26T06:22:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-26T06:16:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-26T06:10:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-26T06:04:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-26T05:58:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-26T05:52:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-26T05:46:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-26T05:40:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-26T05:34:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-26T05:28:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-26T05:22:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-26T05:16:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-26T05:10:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-26T05:04:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-26T04:58:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-26T04:52:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-26T04:46:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-26T04:40:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-26T04:34:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-26T04:28:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-26T04:22:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-26T04:16:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-26T04:10:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-26T04:04:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-26T03:58:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-26T03:52:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-26T03:46:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-26T03:40:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-26T03:34:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-26T03:28:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-26T03:22:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-26T03:16:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-26T03:10:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-26T03:04:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-26T02:58:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-26T02:52:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-26T02:46:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-26T02:40:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-26T02:34:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-26T02:28:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-26T02:22:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-26T02:16:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-26T02:10:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-26T02:04:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-26T01:58:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-26T01:52:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-26T01:46:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-26T01:40:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-26T01:34:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-26T01:28:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-26T01:22:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-26T01:16:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-26T01:10:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-26T01:04:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-26T00:58:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-26T00:52:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-26T00:46:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-26T00:40:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-26T00:34:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-26T00:28:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-26T00:22:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-26T00:16:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-26T00:10:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-26T00:04:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-25T23:58:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-25T23:52:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-25T23:46:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-25T23:40:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-25T23:34:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-25T23:28:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-25T23:22:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-25T23:16:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-25T23:10:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-25T23:04:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-25T22:58:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-25T22:52:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-25T22:46:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-25T22:40:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-25T22:34:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-25T22:28:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-25T22:22:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-25T22:16:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-25T22:10:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-25T22:04:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-25T21:58:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-26T09:52:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-26T09:46:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-26T09:40:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-26T09:34:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-26T09:28:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-26T09:22:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-26T09:16:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-26T09:10:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-26T09:04:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-26T08:58:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-26T08:52:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-26T08:46:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-26T08:40:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-26T08:34:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-26T08:28:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-26T08:22:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-26T08:16:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-26T08:10:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-26T08:04:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-26T07:58:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-26T07:52:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-26T07:46:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-26T07:40:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-26T07:34:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-26T07:28:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-26T07:22:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-26T07:16:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-26T07:10:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-26T07:04:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-26T06:58:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-26T06:52:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-26T06:46:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-26T06:40:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-26T06:34:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-26T06:28:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-26T06:22:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-26T06:16:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-26T06:10:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-26T06:04:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-26T05:58:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-26T05:52:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-26T05:46:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-26T05:40:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-26T05:34:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-26T05:28:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-26T05:22:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-26T05:16:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-26T05:10:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-26T05:04:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-26T04:58:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-26T04:52:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-26T04:46:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-26T04:40:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-26T04:34:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-26T04:28:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-26T04:22:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-26T04:16:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-26T04:10:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-26T04:04:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-26T03:58:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-26T03:52:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-26T03:46:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-26T03:40:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-26T03:34:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-26T03:28:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-26T03:22:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-26T03:16:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-26T03:10:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-26T03:04:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-26T02:58:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-26T02:52:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-26T02:46:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-26T02:40:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-26T02:34:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-26T02:28:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-26T02:22:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-26T02:16:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-26T02:10:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-26T02:04:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-26T01:58:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-26T01:52:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-26T01:46:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-26T01:40:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-26T01:34:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-26T01:28:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-26T01:22:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-26T01:16:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-26T01:10:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-26T01:04:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-26T00:58:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-26T00:52:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-26T00:46:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-26T00:40:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-26T00:34:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-26T00:28:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-26T00:22:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-26T00:16:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-26T00:10:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-26T00:04:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-25T23:58:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-25T23:52:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-25T23:46:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-25T23:40:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-25T23:34:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-25T23:28:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-25T23:22:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-25T23:16:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-25T23:10:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-25T23:04:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-25T22:58:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-25T22:52:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-25T22:46:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-25T22:40:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-25T22:34:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-25T22:28:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-25T22:22:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-25T22:16:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-25T22:10:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-25T22:04:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-25T21:58:00Z" } ] != \n\u\l\l ]] + rm -f payload.json + get_qan_values mongodb dev-mongos admin:admin + local service_type=mongodb + local environment=dev-mongos + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/sbin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2026-03-25T21:52:11+00:00 ++ /usr/sbin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2026-03-26T09:52:11+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zJJVHpjAMf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4qRgRAiu45 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.zJJVHpjAMf +++ cat /tmp/tmp.4qRgRAiu45 +++ rm /tmp/tmp.zJJVHpjAMf /tmp/tmp.4qRgRAiu45 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WtZabM0OEN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.WfCBDVd1Da +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.WtZabM0OEN +++ cat /tmp/tmp.WfCBDVd1Da +++ rm /tmp/tmp.WtZabM0OEN /tmp/tmp.WfCBDVd1Da +++ return 0 ++ local ip=34.27.251.120 ++ '[' -n 34.27.251.120 -a 34.27.251.120 '!=' null ']' ++ echo 34.27.251.120 ++ return + endpoint=34.27.251.120 + cat + local response + retry=0 ++ curl -s -k -XPOST -d @payload.json https://admin:admin@34.27.251.120/v0/qan/GetReport ++ jq '.rows[].sparkline' + [[ [ { "time_frame": 360, "timestamp": "2026-03-26T09:52:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-26T09:46:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-26T09:40:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-26T09:34:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-26T09:28:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-26T09:22:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-26T09:16:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-26T09:10:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-26T09:04:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-26T08:58:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-26T08:52:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-26T08:46:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-26T08:40:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-26T08:34:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-26T08:28:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-26T08:22:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-26T08:16:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-26T08:10:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-26T08:04:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-26T07:58:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-26T07:52:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-26T07:46:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-26T07:40:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-26T07:34:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-26T07:28:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-26T07:22:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-26T07:16:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-26T07:10:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-26T07:04:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-26T06:58:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-26T06:52:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-26T06:46:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-26T06:40:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-26T06:34:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-26T06:28:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-26T06:22:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-26T06:16:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-26T06:10:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-26T06:04:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-26T05:58:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-26T05:52:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-26T05:46:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-26T05:40:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-26T05:34:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-26T05:28:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-26T05:22:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-26T05:16:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-26T05:10:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-26T05:04:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-26T04:58:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-26T04:52:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-26T04:46:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-26T04:40:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-26T04:34:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-26T04:28:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-26T04:22:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-26T04:16:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-26T04:10:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-26T04:04:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-26T03:58:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-26T03:52:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-26T03:46:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-26T03:40:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-26T03:34:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-26T03:28:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-26T03:22:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-26T03:16:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-26T03:10:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-26T03:04:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-26T02:58:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-26T02:52:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-26T02:46:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-26T02:40:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-26T02:34:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-26T02:28:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-26T02:22:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-26T02:16:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-26T02:10:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-26T02:04:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-26T01:58:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-26T01:52:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-26T01:46:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-26T01:40:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-26T01:34:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-26T01:28:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-26T01:22:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-26T01:16:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-26T01:10:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-26T01:04:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-26T00:58:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-26T00:52:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-26T00:46:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-26T00:40:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-26T00:34:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-26T00:28:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-26T00:22:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-26T00:16:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-26T00:10:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-26T00:04:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-25T23:58:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-25T23:52:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-25T23:46:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-25T23:40:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-25T23:34:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-25T23:28:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-25T23:22:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-25T23:16:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-25T23:10:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-25T23:04:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-25T22:58:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-25T22:52:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-25T22:46:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-25T22:40:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-25T22:34:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-25T22:28:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-25T22:22:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-25T22:16:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-25T22:10:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-25T22:04:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-25T21:58:00Z" } ] != \n\u\l\l ]] + rm -f payload.json + nodeList=($(get_node_id_from_pmm)) ++ get_node_id_from_pmm ++ nodeList=() ++ local -a nodeList +++ kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4dwIIEz350 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.HylNelMZea +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4dwIIEz350 +++ cat /tmp/tmp.HylNelMZea +++ rm /tmp/tmp.4dwIIEz350 /tmp/tmp.HylNelMZea +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rrChnxQmaa ++++ mktemp +++ local LAST_ERR=/tmp/tmp.h7I5Qnwohz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.rrChnxQmaa +++ cat /tmp/tmp.h7I5Qnwohz +++ rm /tmp/tmp.rrChnxQmaa /tmp/tmp.h7I5Qnwohz +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZuTWOdUztf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4gcK6lIowh +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ZuTWOdUztf +++ cat /tmp/tmp.4gcK6lIowh +++ rm /tmp/tmp.ZuTWOdUztf /tmp/tmp.4gcK6lIowh +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.iir0ndtFc5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3IMNYmumrc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.iir0ndtFc5 +++ cat /tmp/tmp.3IMNYmumrc +++ rm /tmp/tmp.iir0ndtFc5 /tmp/tmp.3IMNYmumrc +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CcQsWmg7e4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QsEpUUOE1V +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.CcQsWmg7e4 +++ cat /tmp/tmp.QsEpUUOE1V +++ rm /tmp/tmp.CcQsWmg7e4 /tmp/tmp.QsEpUUOE1V +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ jq -r .pmm_agent_status.node_id +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wjeREEp7lZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SrVkHygCsQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wjeREEp7lZ +++ cat /tmp/tmp.SrVkHygCsQ +++ rm /tmp/tmp.wjeREEp7lZ /tmp/tmp.SrVkHygCsQ +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kQdHXCYs6Y ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6GtypfFOUp +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kQdHXCYs6Y +++ cat /tmp/tmp.6GtypfFOUp +++ rm /tmp/tmp.kQdHXCYs6Y /tmp/tmp.6GtypfFOUp +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mGkP3JCte0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.c00bCmg6ut +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.mGkP3JCte0 +++ cat /tmp/tmp.c00bCmg6ut +++ rm /tmp/tmp.mGkP3JCte0 /tmp/tmp.c00bCmg6ut +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5njrg15Y97 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pg9nmeFHbH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.5njrg15Y97 +++ cat /tmp/tmp.pg9nmeFHbH +++ rm /tmp/tmp.5njrg15Y97 /tmp/tmp.pg9nmeFHbH +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hfLGUKwnYn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bHsRIA8cAs +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.hfLGUKwnYn +++ cat /tmp/tmp.bHsRIA8cAs +++ rm /tmp/tmp.hfLGUKwnYn /tmp/tmp.bHsRIA8cAs +++ return 0 ++ echo /node_id/398a3651-0208-4cc8-a227-b92f2f1d5d86 /node_id/d1b20f75-861c-4033-b9ed-b6c2abe6ec32 /node_id/79439308-0102-4453-941b-2eb125a06403 /node_id/b3dd91af-88b9-42f9-a5d6-04a0cafde469 /node_id/d955be85-c451-46c2-be07-65c7b0211635 /node_id/51874d3f-b992-4b5b-9318-de3132e782db /node_id/0b8931af-a766-4468-90bf-6e8342cc1813 /node_id/06acfbc6-463b-4128-a5bd-df9da9977e01 /node_id/948516c7-32cb-4b4f-bed1-44a5475ca06d + nodeList_from_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/398a3651-0208-4cc8-a227-b92f2f1d5d86 /node_id/d1b20f75-861c-4033-b9ed-b6c2abe6ec32 /node_id/79439308-0102-4453-941b-2eb125a06403 /node_id/b3dd91af-88b9-42f9-a5d6-04a0cafde469 /node_id/d955be85-c451-46c2-be07-65c7b0211635 /node_id/51874d3f-b992-4b5b-9318-de3132e782db /node_id/0b8931af-a766-4468-90bf-6e8342cc1813 /node_id/06acfbc6-463b-4128-a5bd-df9da9977e01 /node_id/948516c7-32cb-4b4f-bed1-44a5475ca06d ++ nodeList=('/node_id/398a3651-0208-4cc8-a227-b92f2f1d5d86' '/node_id/d1b20f75-861c-4033-b9ed-b6c2abe6ec32' '/node_id/79439308-0102-4453-941b-2eb125a06403' '/node_id/b3dd91af-88b9-42f9-a5d6-04a0cafde469' '/node_id/d955be85-c451-46c2-be07-65c7b0211635' '/node_id/51874d3f-b992-4b5b-9318-de3132e782db' '/node_id/0b8931af-a766-4468-90bf-6e8342cc1813' '/node_id/06acfbc6-463b-4128-a5bd-df9da9977e01' '/node_id/948516c7-32cb-4b4f-bed1-44a5475ca06d') ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/398a3651-0208-4cc8-a227-b92f2f1d5d86 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.APJRceAN1f +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.PC2x08P3Vu ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.APJRceAN1f ++++ cat /tmp/tmp.PC2x08P3Vu ++++ rm /tmp/tmp.APJRceAN1f /tmp/tmp.PC2x08P3Vu ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.LKMdi3Scm3 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Ce4IIxF4UC ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.LKMdi3Scm3 ++++ cat /tmp/tmp.Ce4IIxF4UC ++++ rm /tmp/tmp.LKMdi3Scm3 /tmp/tmp.Ce4IIxF4UC ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Ci4RVek3ET ++++ mktemp +++ local LAST_ERR=/tmp/tmp.f30eBjlIcJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Ci4RVek3ET +++ cat /tmp/tmp.f30eBjlIcJ +++ rm /tmp/tmp.Ci4RVek3ET /tmp/tmp.f30eBjlIcJ +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/d1b20f75-861c-4033-b9ed-b6c2abe6ec32 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.o4dNgCpqIR +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.QAnF2U9xkg ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.o4dNgCpqIR ++++ cat /tmp/tmp.QAnF2U9xkg ++++ rm /tmp/tmp.o4dNgCpqIR /tmp/tmp.QAnF2U9xkg ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.0mQhYAnkVW +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.MFBZrYf77L ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.0mQhYAnkVW ++++ cat /tmp/tmp.MFBZrYf77L ++++ rm /tmp/tmp.0mQhYAnkVW /tmp/tmp.MFBZrYf77L ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.AkBdp5U462 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LZKk2hwddQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.AkBdp5U462 +++ cat /tmp/tmp.LZKk2hwddQ +++ rm /tmp/tmp.AkBdp5U462 /tmp/tmp.LZKk2hwddQ +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/79439308-0102-4453-941b-2eb125a06403 ++++ get_pmm_service_ip monitoring-service +++ awk '{print $4}' ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.26Qu9Bn7m6 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.9i4C4w44Bf ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.26Qu9Bn7m6 ++++ cat /tmp/tmp.9i4C4w44Bf ++++ rm /tmp/tmp.26Qu9Bn7m6 /tmp/tmp.9i4C4w44Bf ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.pBQeQHZCyW +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.pDD87H9n6E ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.pBQeQHZCyW ++++ cat /tmp/tmp.pDD87H9n6E ++++ rm /tmp/tmp.pBQeQHZCyW /tmp/tmp.pDD87H9n6E ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ObyqBlsUYN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Q1QTfK7NvO +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ObyqBlsUYN +++ cat /tmp/tmp.Q1QTfK7NvO +++ rm /tmp/tmp.ObyqBlsUYN /tmp/tmp.Q1QTfK7NvO +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/b3dd91af-88b9-42f9-a5d6-04a0cafde469 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.YZ7rbieD1k +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.mJN5zs2iNa ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.YZ7rbieD1k ++++ cat /tmp/tmp.mJN5zs2iNa ++++ rm /tmp/tmp.YZ7rbieD1k /tmp/tmp.mJN5zs2iNa ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.jcNGiZYJ5M +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.kCiXuLGj6y ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.jcNGiZYJ5M ++++ cat /tmp/tmp.kCiXuLGj6y ++++ rm /tmp/tmp.jcNGiZYJ5M /tmp/tmp.kCiXuLGj6y ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qSqZMzX4CH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aAyLUzrzbj +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.qSqZMzX4CH +++ cat /tmp/tmp.aAyLUzrzbj +++ rm /tmp/tmp.qSqZMzX4CH /tmp/tmp.aAyLUzrzbj +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/d955be85-c451-46c2-be07-65c7b0211635 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.HwdlHZIOAK +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.qa10tYBuKJ ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.HwdlHZIOAK ++++ cat /tmp/tmp.qa10tYBuKJ ++++ rm /tmp/tmp.HwdlHZIOAK /tmp/tmp.qa10tYBuKJ ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.4uGIADgzRo +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.mfva6JHB6q ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.4uGIADgzRo ++++ cat /tmp/tmp.mfva6JHB6q ++++ rm /tmp/tmp.4uGIADgzRo /tmp/tmp.mfva6JHB6q ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.yfOaKUdBNQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.abUo9E6ObV +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.yfOaKUdBNQ +++ cat /tmp/tmp.abUo9E6ObV +++ rm /tmp/tmp.yfOaKUdBNQ /tmp/tmp.abUo9E6ObV +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/51874d3f-b992-4b5b-9318-de3132e782db +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.P6qk3P9Y8B +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.oTvi29hWjn ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.P6qk3P9Y8B ++++ cat /tmp/tmp.oTvi29hWjn ++++ rm /tmp/tmp.P6qk3P9Y8B /tmp/tmp.oTvi29hWjn ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.RWTho2BLyE +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.MNTGxAqSOv ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.RWTho2BLyE ++++ cat /tmp/tmp.MNTGxAqSOv ++++ rm /tmp/tmp.RWTho2BLyE /tmp/tmp.MNTGxAqSOv ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RkZMfRHQhN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XdrHLitJ5D +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RkZMfRHQhN +++ cat /tmp/tmp.XdrHLitJ5D +++ rm /tmp/tmp.RkZMfRHQhN /tmp/tmp.XdrHLitJ5D +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/0b8931af-a766-4468-90bf-6e8342cc1813 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.U5HebmDmJ8 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Ex4p1nPM01 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.U5HebmDmJ8 ++++ cat /tmp/tmp.Ex4p1nPM01 ++++ rm /tmp/tmp.U5HebmDmJ8 /tmp/tmp.Ex4p1nPM01 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.j2F7FGFzNE +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.FfXzeO8PQl ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.j2F7FGFzNE ++++ cat /tmp/tmp.FfXzeO8PQl ++++ rm /tmp/tmp.j2F7FGFzNE /tmp/tmp.FfXzeO8PQl ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2O78hRPmNJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.tLBiaox3Nm +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2O78hRPmNJ +++ cat /tmp/tmp.tLBiaox3Nm +++ rm /tmp/tmp.2O78hRPmNJ /tmp/tmp.tLBiaox3Nm +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep /node_id/06acfbc6-463b-4128-a5bd-df9da9977e01 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.PyKz36lT4G +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.57lZcezljP ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.PyKz36lT4G ++++ cat /tmp/tmp.57lZcezljP ++++ rm /tmp/tmp.PyKz36lT4G /tmp/tmp.57lZcezljP ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.tM7WfgDIyW +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.N5MNzt5rLj ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.tM7WfgDIyW ++++ cat /tmp/tmp.N5MNzt5rLj ++++ rm /tmp/tmp.tM7WfgDIyW /tmp/tmp.N5MNzt5rLj ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aTD1rErLQ2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3hQ42GEnjD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.aTD1rErLQ2 +++ cat /tmp/tmp.3hQ42GEnjD +++ rm /tmp/tmp.aTD1rErLQ2 /tmp/tmp.3hQ42GEnjD +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/948516c7-32cb-4b4f-bed1-44a5475ca06d +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.mg0vxnznua +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.vlolv58aOj ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.mg0vxnznua ++++ cat /tmp/tmp.vlolv58aOj ++++ rm /tmp/tmp.mg0vxnznua /tmp/tmp.vlolv58aOj ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.cWXXCOJ3pG +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.AfyXp3HM48 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.cWXXCOJ3pG ++++ cat /tmp/tmp.AfyXp3HM48 ++++ rm /tmp/tmp.cWXXCOJ3pG /tmp/tmp.AfyXp3HM48 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zs31dnWuYc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9HjzezqjSe +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.zs31dnWuYc +++ cat /tmp/tmp.9HjzezqjSe +++ rm /tmp/tmp.zs31dnWuYc /tmp/tmp.9HjzezqjSe +++ return 0 ++ echo /node_id/398a3651-0208-4cc8-a227-b92f2f1d5d86 /node_id/d1b20f75-861c-4033-b9ed-b6c2abe6ec32 /node_id/79439308-0102-4453-941b-2eb125a06403 /node_id/b3dd91af-88b9-42f9-a5d6-04a0cafde469 /node_id/d955be85-c451-46c2-be07-65c7b0211635 /node_id/51874d3f-b992-4b5b-9318-de3132e782db /node_id/0b8931af-a766-4468-90bf-6e8342cc1813 /node_id/06acfbc6-463b-4128-a5bd-df9da9977e01 /node_id/948516c7-32cb-4b4f-bed1-44a5475ca06d + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/398a3651-0208-4cc8-a227-b92f2f1d5d86 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/d1b20f75-861c-4033-b9ed-b6c2abe6ec32 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/79439308-0102-4453-941b-2eb125a06403 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/b3dd91af-88b9-42f9-a5d6-04a0cafde469 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/d955be85-c451-46c2-be07-65c7b0211635 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/51874d3f-b992-4b5b-9318-de3132e782db ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/0b8931af-a766-4468-90bf-6e8342cc1813 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/06acfbc6-463b-4128-a5bd-df9da9977e01 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/948516c7-32cb-4b4f-bed1-44a5475ca06d ']' + kubectl_bin patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.IqtQJvtzmP ++ mktemp + local LAST_ERR=/tmp/tmp.nJGJ7U67TP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IqtQJvtzmP perconaservermongodb.psmdb.percona.com/monitoring patched + cat /tmp/tmp.nJGJ7U67TP + rm /tmp/tmp.IqtQJvtzmP /tmp/tmp.nJGJ7U67TP + return 0 + wait_for_delete pod/monitoring-mongos-0 + local res=pod/monitoring-mongos-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-mongos-0 to be deleted...........................Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found + wait_for_delete pod/monitoring-rs0-0 + local res=pod/monitoring-rs0-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-rs0-0 to be deleted...........Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found + wait_for_delete pod/monitoring-cfg-0 + local res=pod/monitoring-cfg-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-cfg-0 to be deleted.........Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found + desc 'check if services are not deleted' + set +o xtrace ----------------------------------------------------------------------------------- check if services are not deleted ----------------------------------------------------------------------------------- + kubectl_bin get svc monitoring-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.fBZqhiKjf6 ++ mktemp + local LAST_ERR=/tmp/tmp.la08BGzV7H + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fBZqhiKjf6 NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-rs0 ClusterIP None 27019/TCP 15m + cat /tmp/tmp.la08BGzV7H + rm /tmp/tmp.fBZqhiKjf6 /tmp/tmp.la08BGzV7H + return 0 + kubectl_bin get svc monitoring-cfg ++ mktemp + local LAST_OUT=/tmp/tmp.elVLUTPtEQ ++ mktemp + local LAST_ERR=/tmp/tmp.omYFLdjSSj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.elVLUTPtEQ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-cfg ClusterIP None 27019/TCP 15m + cat /tmp/tmp.omYFLdjSSj + rm /tmp/tmp.elVLUTPtEQ /tmp/tmp.omYFLdjSSj + return 0 + kubectl_bin get svc monitoring-mongos ++ mktemp + local LAST_OUT=/tmp/tmp.hxxx08az1f ++ mktemp + local LAST_ERR=/tmp/tmp.deY41YNxlJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hxxx08az1f NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-mongos ClusterIP 34.118.226.62 27019/TCP 15m + cat /tmp/tmp.deY41YNxlJ + rm /tmp/tmp.hxxx08az1f /tmp/tmp.deY41YNxlJ + return 0 + does_node_id_exists_in_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/398a3651-0208-4cc8-a227-b92f2f1d5d86 /node_id/d1b20f75-861c-4033-b9ed-b6c2abe6ec32 /node_id/79439308-0102-4453-941b-2eb125a06403 /node_id/b3dd91af-88b9-42f9-a5d6-04a0cafde469 /node_id/d955be85-c451-46c2-be07-65c7b0211635 /node_id/51874d3f-b992-4b5b-9318-de3132e782db /node_id/0b8931af-a766-4468-90bf-6e8342cc1813 /node_id/06acfbc6-463b-4128-a5bd-df9da9977e01 /node_id/948516c7-32cb-4b4f-bed1-44a5475ca06d ++ nodeList=('/node_id/398a3651-0208-4cc8-a227-b92f2f1d5d86' '/node_id/d1b20f75-861c-4033-b9ed-b6c2abe6ec32' '/node_id/79439308-0102-4453-941b-2eb125a06403' '/node_id/b3dd91af-88b9-42f9-a5d6-04a0cafde469' '/node_id/d955be85-c451-46c2-be07-65c7b0211635' '/node_id/51874d3f-b992-4b5b-9318-de3132e782db' '/node_id/0b8931af-a766-4468-90bf-6e8342cc1813' '/node_id/06acfbc6-463b-4128-a5bd-df9da9977e01' '/node_id/948516c7-32cb-4b4f-bed1-44a5475ca06d') ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/398a3651-0208-4cc8-a227-b92f2f1d5d86 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.OkJBw8v4fM +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.1CWUF6ZrLl ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.OkJBw8v4fM ++++ cat /tmp/tmp.1CWUF6ZrLl ++++ rm /tmp/tmp.OkJBw8v4fM /tmp/tmp.1CWUF6ZrLl ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.av5aRT0lq2 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.QBaxOZcN24 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.av5aRT0lq2 ++++ cat /tmp/tmp.QBaxOZcN24 ++++ rm /tmp/tmp.av5aRT0lq2 /tmp/tmp.QBaxOZcN24 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7jg5THu0Rl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UKpzZuy3Wl +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.7jg5THu0Rl +++ cat /tmp/tmp.UKpzZuy3Wl +++ rm /tmp/tmp.7jg5THu0Rl /tmp/tmp.UKpzZuy3Wl +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/d1b20f75-861c-4033-b9ed-b6c2abe6ec32 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.TcH34Fyc1h +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.5v1BDcYqCE ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.TcH34Fyc1h ++++ cat /tmp/tmp.5v1BDcYqCE ++++ rm /tmp/tmp.TcH34Fyc1h /tmp/tmp.5v1BDcYqCE ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ETBFJnUz0V +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.lllo5M6O4z ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.ETBFJnUz0V ++++ cat /tmp/tmp.lllo5M6O4z ++++ rm /tmp/tmp.ETBFJnUz0V /tmp/tmp.lllo5M6O4z ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.roiuFOq1v6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.OiDNOS2sVK +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.roiuFOq1v6 +++ cat /tmp/tmp.OiDNOS2sVK +++ rm /tmp/tmp.roiuFOq1v6 /tmp/tmp.OiDNOS2sVK +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/79439308-0102-4453-941b-2eb125a06403 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.hwI5R8ZxfH +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.QmsHlXBNcZ ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.hwI5R8ZxfH ++++ cat /tmp/tmp.QmsHlXBNcZ ++++ rm /tmp/tmp.hwI5R8ZxfH /tmp/tmp.QmsHlXBNcZ ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.9doUu8x2vO +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.1sGgWCjbmh ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.9doUu8x2vO ++++ cat /tmp/tmp.1sGgWCjbmh ++++ rm /tmp/tmp.9doUu8x2vO /tmp/tmp.1sGgWCjbmh ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.deORWXx8RS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6FeCb6i5PY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.deORWXx8RS +++ cat /tmp/tmp.6FeCb6i5PY +++ rm /tmp/tmp.deORWXx8RS /tmp/tmp.6FeCb6i5PY +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/b3dd91af-88b9-42f9-a5d6-04a0cafde469 ++++ get_pmm_service_ip monitoring-service +++ awk '{print $4}' ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.TfRLj8wNdv +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.KhmjhBTnve ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.TfRLj8wNdv ++++ cat /tmp/tmp.KhmjhBTnve ++++ rm /tmp/tmp.TfRLj8wNdv /tmp/tmp.KhmjhBTnve ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.iLWlCnXSYD +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.NGfCcwqxWz ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.iLWlCnXSYD ++++ cat /tmp/tmp.NGfCcwqxWz ++++ rm /tmp/tmp.iLWlCnXSYD /tmp/tmp.NGfCcwqxWz ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.MpLYbFPKLT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mJ16LzKSbD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.MpLYbFPKLT +++ cat /tmp/tmp.mJ16LzKSbD +++ rm /tmp/tmp.MpLYbFPKLT /tmp/tmp.mJ16LzKSbD +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/d955be85-c451-46c2-be07-65c7b0211635 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.LQrnhsFyDv +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.E4yLELx9hu ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.LQrnhsFyDv ++++ cat /tmp/tmp.E4yLELx9hu ++++ rm /tmp/tmp.LQrnhsFyDv /tmp/tmp.E4yLELx9hu ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.7yH0uUx1MK +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.K6sdoHDQML ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.7yH0uUx1MK ++++ cat /tmp/tmp.K6sdoHDQML ++++ rm /tmp/tmp.7yH0uUx1MK /tmp/tmp.K6sdoHDQML ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.V3yR3xNWjj ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mBPLz5VATI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.V3yR3xNWjj +++ cat /tmp/tmp.mBPLz5VATI +++ rm /tmp/tmp.V3yR3xNWjj /tmp/tmp.mBPLz5VATI +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/51874d3f-b992-4b5b-9318-de3132e782db +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.vBCIcb9dnJ +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Kqt6U2rZx9 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.vBCIcb9dnJ ++++ cat /tmp/tmp.Kqt6U2rZx9 ++++ rm /tmp/tmp.vBCIcb9dnJ /tmp/tmp.Kqt6U2rZx9 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.oEt2azZUu5 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.TzAjhlLgJG ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.oEt2azZUu5 ++++ cat /tmp/tmp.TzAjhlLgJG ++++ rm /tmp/tmp.oEt2azZUu5 /tmp/tmp.TzAjhlLgJG ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rITl5TRXCV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.R54Vkhdyn3 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.rITl5TRXCV +++ cat /tmp/tmp.R54Vkhdyn3 +++ rm /tmp/tmp.rITl5TRXCV /tmp/tmp.R54Vkhdyn3 +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/0b8931af-a766-4468-90bf-6e8342cc1813 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++ awk '{print $4}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.FhqKaeUSKr +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.3xAbb40IYt ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.FhqKaeUSKr ++++ cat /tmp/tmp.3xAbb40IYt ++++ rm /tmp/tmp.FhqKaeUSKr /tmp/tmp.3xAbb40IYt ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.vOMSZ2gCfE +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.64jlQWbtJq ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.vOMSZ2gCfE ++++ cat /tmp/tmp.64jlQWbtJq ++++ rm /tmp/tmp.vOMSZ2gCfE /tmp/tmp.64jlQWbtJq ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.bHfa9nvcgr ++++ mktemp +++ local LAST_ERR=/tmp/tmp.tsuWJyx1YZ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.bHfa9nvcgr +++ cat /tmp/tmp.tsuWJyx1YZ +++ rm /tmp/tmp.bHfa9nvcgr /tmp/tmp.tsuWJyx1YZ +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/06acfbc6-463b-4128-a5bd-df9da9977e01 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.EiqVtHLRxU +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Bj2z0Ee0RF ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.EiqVtHLRxU ++++ cat /tmp/tmp.Bj2z0Ee0RF ++++ rm /tmp/tmp.EiqVtHLRxU /tmp/tmp.Bj2z0Ee0RF ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.7MvckQ5ebN +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.GdTvz9fTbK ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.7MvckQ5ebN ++++ cat /tmp/tmp.GdTvz9fTbK ++++ rm /tmp/tmp.7MvckQ5ebN /tmp/tmp.GdTvz9fTbK ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DOpZ0zAeTa ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9IH27auyNy +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.DOpZ0zAeTa +++ cat /tmp/tmp.9IH27auyNy +++ rm /tmp/tmp.DOpZ0zAeTa /tmp/tmp.9IH27auyNy +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/948516c7-32cb-4b4f-bed1-44a5475ca06d +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.CiV8DjcasT +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.EylNuCHqZl ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.CiV8DjcasT ++++ cat /tmp/tmp.EylNuCHqZl ++++ rm /tmp/tmp.CiV8DjcasT /tmp/tmp.EylNuCHqZl ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.G2kjZiS8AB +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.rErBusp1I9 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.G2kjZiS8AB ++++ cat /tmp/tmp.rErBusp1I9 ++++ rm /tmp/tmp.G2kjZiS8AB /tmp/tmp.rErBusp1I9 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ksMfo9BJcV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.kQFJMhUfYV +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-17023 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.27.251.120/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ksMfo9BJcV +++ cat /tmp/tmp.kQFJMhUfYV +++ rm /tmp/tmp.ksMfo9BJcV /tmp/tmp.kQFJMhUfYV +++ return 0 ++ echo + desc 'check customClusterName for pmm' + set +o xtrace ----------------------------------------------------------------------------------- check customClusterName for pmm ----------------------------------------------------------------------------------- + custom_name=custom-cluster-name + kubectl_bin patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":false}, {"op":"add","path":"/spec/pmm/customClusterName","value":custom-cluster-name}]' ++ mktemp + local LAST_OUT=/tmp/tmp.li0HuOIgqU ++ mktemp + local LAST_ERR=/tmp/tmp.a0IlahYMTd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":false}, {"op":"add","path":"/spec/pmm/customClusterName","value":custom-cluster-name}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.li0HuOIgqU perconaservermongodb.psmdb.percona.com/monitoring patched + cat /tmp/tmp.a0IlahYMTd + rm /tmp/tmp.li0HuOIgqU /tmp/tmp.a0IlahYMTd + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready..........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready...........OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GML4mvkIkV +++ mktemp ++ local LAST_ERR=/tmp/tmp.9QegWrHmwV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GML4mvkIkV ++ cat /tmp/tmp.9QegWrHmwV ++ rm /tmp/tmp.GML4mvkIkV /tmp/tmp.9QegWrHmwV ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready.............OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PCnjcKHtE1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QjfLvgOLfj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PCnjcKHtE1 ++ cat /tmp/tmp.QjfLvgOLfj ++ rm /tmp/tmp.PCnjcKHtE1 /tmp/tmp.QjfLvgOLfj ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3o0YauIMcq +++ mktemp ++ local LAST_ERR=/tmp/tmp.sdQbwuxLGR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3o0YauIMcq ++ cat /tmp/tmp.sdQbwuxLGR ++ rm /tmp/tmp.3o0YauIMcq /tmp/tmp.sdQbwuxLGR ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness....... ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cxHMRp9aTX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fcFpb0sgxM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.cxHMRp9aTX +++ cat /tmp/tmp.fcFpb0sgxM +++ rm /tmp/tmp.cxHMRp9aTX /tmp/tmp.fcFpb0sgxM +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ieYIkTm1tq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xHBU2vAtsZ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ieYIkTm1tq +++ cat /tmp/tmp.xHBU2vAtsZ +++ rm /tmp/tmp.ieYIkTm1tq /tmp/tmp.xHBU2vAtsZ +++ return 0 ++ local ip=34.27.251.120 ++ '[' -n 34.27.251.120 -a 34.27.251.120 '!=' null ']' ++ echo 34.27.251.120 ++ return + curl -s -k -d '{"service_type":"MONGODB_SERVICE"}' https://admin:admin@34.27.251.120/v1/inventory/Services/List + check_custom_cluster_name monitoring-2-0-17023-monitoring-mongos-0 /tmp/tmp.Nkr2LYGrdi/pmm_service_list.json + local pod_service_name=monitoring-2-0-17023-monitoring-mongos-0 + local pmm_services_file=/tmp/tmp.Nkr2LYGrdi/pmm_service_list.json + echo 'Checking monitoring-2-0-17023-monitoring-mongos-0' Checking monitoring-2-0-17023-monitoring-mongos-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-17023-monitoring-mongos-0") | .cluster' /tmp/tmp.Nkr2LYGrdi/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + check_custom_cluster_name monitoring-2-0-17023-monitoring-rs0-0 /tmp/tmp.Nkr2LYGrdi/pmm_service_list.json + local pod_service_name=monitoring-2-0-17023-monitoring-rs0-0 + local pmm_services_file=/tmp/tmp.Nkr2LYGrdi/pmm_service_list.json + echo 'Checking monitoring-2-0-17023-monitoring-rs0-0' Checking monitoring-2-0-17023-monitoring-rs0-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-17023-monitoring-rs0-0") | .cluster' /tmp/tmp.Nkr2LYGrdi/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + check_custom_cluster_name monitoring-2-0-17023-monitoring-cfg-0 /tmp/tmp.Nkr2LYGrdi/pmm_service_list.json + local pod_service_name=monitoring-2-0-17023-monitoring-cfg-0 + local pmm_services_file=/tmp/tmp.Nkr2LYGrdi/pmm_service_list.json + echo 'Checking monitoring-2-0-17023-monitoring-cfg-0' Checking monitoring-2-0-17023-monitoring-cfg-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-17023-monitoring-cfg-0") | .cluster' /tmp/tmp.Nkr2LYGrdi/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + [[ -n '' ]] ++ kubectl_bin logs monitoring-rs0-0 pmm-client ++ grep -c 'cannot auto discover databases and collections' +++ mktemp ++ local LAST_OUT=/tmp/tmp.G7Yh9WSp1z +++ mktemp ++ local LAST_ERR=/tmp/tmp.kRYNMCycNA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.G7Yh9WSp1z ++ cat /tmp/tmp.kRYNMCycNA ++ rm /tmp/tmp.G7Yh9WSp1z /tmp/tmp.kRYNMCycNA ++ return 0 + [[ 0 != 0 ]] + helm uninstall monitoring release "monitoring" uninstalled + destroy monitoring-2-0-17023 + local namespace=monitoring-2-0-17023 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.9Zn0y4IuB0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.2MD4b4vP7x ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9Zn0y4IuB0 ++ cat /tmp/tmp.2MD4b4vP7x No resources found in monitoring-2-0-17023 namespace. ++ rm /tmp/tmp.9Zn0y4IuB0 /tmp/tmp.2MD4b4vP7x ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.BwN8amllOq ++ mktemp + local LAST_ERR=/tmp/tmp.uPHP3M0ZL4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BwN8amllOq customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.uPHP3M0ZL4 + rm /tmp/tmp.BwN8amllOq /tmp/tmp.uPHP3M0ZL4 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.oFJzKilSvy ++ mktemp + local LAST_ERR=/tmp/tmp.RSWPp2sZQq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oFJzKilSvy + cat /tmp/tmp.RSWPp2sZQq + rm /tmp/tmp.oFJzKilSvy /tmp/tmp.RSWPp2sZQq + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.CKIaWuIyZG ++ mktemp + local LAST_ERR=/tmp/tmp.kA81PL3Bl2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CKIaWuIyZG + cat /tmp/tmp.kA81PL3Bl2 + rm /tmp/tmp.CKIaWuIyZG /tmp/tmp.kA81PL3Bl2 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.zWG4oxklCf ++ mktemp + local LAST_ERR=/tmp/tmp.ong4mUiRld + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zWG4oxklCf + cat /tmp/tmp.ong4mUiRld + rm /tmp/tmp.zWG4oxklCf /tmp/tmp.ong4mUiRld + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.rd5FgJFzAY ++ mktemp + local LAST_ERR=/tmp/tmp.5EyROfz4zb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2287/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rd5FgJFzAY clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.5EyROfz4zb + rm /tmp/tmp.rd5FgJFzAY /tmp/tmp.5EyROfz4zb + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.cnOnYLMitT ++ mktemp + local LAST_ERR=/tmp/tmp.xi79rV53kX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.cnOnYLMitT namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted from cert-manager namespace serviceaccount "cert-manager" deleted from cert-manager namespace serviceaccount "cert-manager-webhook" deleted from cert-manager namespace clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace deployment.apps "cert-manager-cainjector" deleted from cert-manager namespace deployment.apps "cert-manager" deleted from cert-manager namespace deployment.apps "cert-manager-webhook" deleted from cert-manager namespace mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.xi79rV53kX Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.cnOnYLMitT + cat /tmp/tmp.xi79rV53kX Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.cnOnYLMitT + cat /tmp/tmp.xi79rV53kX Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.cnOnYLMitT + cat /tmp/tmp.xi79rV53kX Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.cnOnYLMitT /tmp/tmp.xi79rV53kX + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace monitoring-2-0-17023 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator + rm -rf /tmp/tmp.Nkr2LYGrdi ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.8SgaOTgO5f + local LAST_OUT=/tmp/tmp.LIDQJsQ844 + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.qkcws00V0a + local LAST_ERR=/tmp/tmp.P1s8DUUuMf + local exit_status=0 + local timeout=4 + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace monitoring-2-0-17023 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator