Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/logs/monitoring-2-0.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + create_infra monitoring-2-0-24852 + local ns=monitoring-2-0-24852 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.lEUvRj5RzS ++ mktemp + local LAST_ERR=/tmp/tmp.43RcyX4W7z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lEUvRj5RzS customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.43RcyX4W7z + rm /tmp/tmp.lEUvRj5RzS /tmp/tmp.43RcyX4W7z + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.10O83UfOdx ++ mktemp + local LAST_ERR=/tmp/tmp.oonNjqCIdt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.10O83UfOdx + cat /tmp/tmp.oonNjqCIdt + rm /tmp/tmp.10O83UfOdx /tmp/tmp.oonNjqCIdt + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.0wEcBUn9DK ++ mktemp + local LAST_ERR=/tmp/tmp.o5x7uHpr1Z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0wEcBUn9DK + cat /tmp/tmp.o5x7uHpr1Z + rm /tmp/tmp.0wEcBUn9DK /tmp/tmp.o5x7uHpr1Z + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.VxgVTqyvcE ++ mktemp + local LAST_ERR=/tmp/tmp.2HdxNv8xa0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VxgVTqyvcE + cat /tmp/tmp.2HdxNv8xa0 + rm /tmp/tmp.VxgVTqyvcE /tmp/tmp.2HdxNv8xa0 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.tFdcPcvaGP ++ mktemp + local LAST_ERR=/tmp/tmp.OyeFkL1mpU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tFdcPcvaGP clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.OyeFkL1mpU + rm /tmp/tmp.tFdcPcvaGP /tmp/tmp.OyeFkL1mpU + return 0 + check_crd_for_deletion PR-2272-ae4e3cbc + local git_tag=PR-2272-ae4e3cbc ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2272-ae4e3cbc/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vwkfgtoghV +++ mktemp ++ local LAST_ERR=/tmp/tmp.bSOOJvA6t4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.vwkfgtoghV ++ cat /tmp/tmp.bSOOJvA6t4 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.vwkfgtoghV ++ cat /tmp/tmp.bSOOJvA6t4 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.vwkfgtoghV ++ cat /tmp/tmp.bSOOJvA6t4 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.vwkfgtoghV ++ cat /tmp/tmp.bSOOJvA6t4 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.vwkfgtoghV /tmp/tmp.bSOOJvA6t4 ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ grep chaos-mesh.org ++ awk '{print $1}' ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrolebinding ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.By8WfytUwA ++ mktemp + local LAST_ERR=/tmp/tmp.qOl2v4oXKC + local exit_status=0 + local timeout=4 + local LAST_OUT=/tmp/tmp.CPOt7JrPwi ++ seq 0 2 ++ mktemp + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.MzL1KWmIbd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.By8WfytUwA + cat /tmp/tmp.qOl2v4oXKC + rm /tmp/tmp.By8WfytUwA /tmp/tmp.qOl2v4oXKC + return 0 namespace "cert-manager" deleted namespace "monitoring-2-0-17697" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CPOt7JrPwi namespace "psmdb-operator" deleted + cat /tmp/tmp.MzL1KWmIbd + rm /tmp/tmp.CPOt7JrPwi /tmp/tmp.MzL1KWmIbd + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.oFAJ1FfoLl ++ mktemp + local LAST_ERR=/tmp/tmp.JC1XJeZ2XC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oFAJ1FfoLl + cat /tmp/tmp.JC1XJeZ2XC + rm /tmp/tmp.oFAJ1FfoLl /tmp/tmp.JC1XJeZ2XC + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.d6XBPXqhGv ++ mktemp + local LAST_ERR=/tmp/tmp.u3jgXgKNgG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.d6XBPXqhGv namespace/psmdb-operator created + cat /tmp/tmp.u3jgXgKNgG + rm /tmp/tmp.d6XBPXqhGv /tmp/tmp.u3jgXgKNgG + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.CYvuMejAYO +++ mktemp ++ local LAST_ERR=/tmp/tmp.lG4sL0UMAS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CYvuMejAYO ++ cat /tmp/tmp.lG4sL0UMAS ++ rm /tmp/tmp.CYvuMejAYO /tmp/tmp.lG4sL0UMAS ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2272-ae4e3cbc-4-cluster13 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.fPXNizuUO3 ++ mktemp + local LAST_ERR=/tmp/tmp.15mSftRT2D + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2272-ae4e3cbc-4-cluster13 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fPXNizuUO3 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2272-ae4e3cbc-4-cluster13" modified. + cat /tmp/tmp.15mSftRT2D + rm /tmp/tmp.fPXNizuUO3 /tmp/tmp.15mSftRT2D + return 0 + deploy_operator + desc 'start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2272-ae4e3cbc' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2272-ae4e3cbc ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.XTSTGR3dqG ++ mktemp + local LAST_ERR=/tmp/tmp.36WLEZ7Lxm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XTSTGR3dqG customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.36WLEZ7Lxm + rm /tmp/tmp.XTSTGR3dqG /tmp/tmp.36WLEZ7Lxm + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: psmdb-operator^' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/cw-rbac.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.rgm0Fbbyot ++ mktemp + local LAST_ERR=/tmp/tmp.k7UqgJYBy7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rgm0Fbbyot clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.k7UqgJYBy7 + rm /tmp/tmp.rgm0Fbbyot /tmp/tmp.k7UqgJYBy7 + return 0 + yq eval ' (.spec.template.spec.containers[].image = "docker.io/perconalab/percona-server-mongodb-operator:PR-2272-ae4e3cbc") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.x0bLsAP181 ++ mktemp + local LAST_ERR=/tmp/tmp.hIlSDymRsV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.x0bLsAP181 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.hIlSDymRsV + rm /tmp/tmp.x0bLsAP181 /tmp/tmp.hIlSDymRsV + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.rliJlnuDCx +++ mktemp ++ local LAST_ERR=/tmp/tmp.4oYZgxwUw9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rliJlnuDCx ++ cat /tmp/tmp.4oYZgxwUw9 ++ rm /tmp/tmp.rliJlnuDCx /tmp/tmp.4oYZgxwUw9 ++ return 0 + wait_operator_pod percona-server-mongodb-operator-6455bb5fb4-q8cfl + local pod=percona-server-mongodb-operator-6455bb5fb4-q8cfl + set +o xtrace waiting for pod/percona-server-mongodb-operator-6455bb5fb4-q8cfl to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.WDP7HaWtVX +++ mktemp ++ local LAST_ERR=/tmp/tmp.V5etUfUgXA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WDP7HaWtVX ++ cat /tmp/tmp.V5etUfUgXA ++ rm /tmp/tmp.WDP7HaWtVX /tmp/tmp.V5etUfUgXA ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-6455bb5fb4-q8cfl ++ mktemp + local LAST_OUT=/tmp/tmp.bGwFfzqGQl ++ mktemp + local LAST_ERR=/tmp/tmp.HrMxUjUMIY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-6455bb5fb4-q8cfl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bGwFfzqGQl + cat /tmp/tmp.HrMxUjUMIY + rm /tmp/tmp.bGwFfzqGQl /tmp/tmp.HrMxUjUMIY + return 0 2026-03-26T09:31:11.510Z INFO setup Manager starting up {"gitCommit": "ae4e3cbc053c422311418c8c6083b24139fc7e69", "gitBranch": "PR-2272-ae4e3cbc", "buildTime": "", "goVersion": "go1.25.8", "os": "linux", "arch": "amd64"} + create_namespace monitoring-2-0-24852 + local namespace=monitoring-2-0-24852 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ tail -n1 ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + awk '{print$1}' + local LAST_OUT=/tmp/tmp.nrpK4DSxxt ++ mktemp + local LAST_ERR=/tmp/tmp.gyDVHORqQp + '[' -n '' ']' + local exit_status=0 + local timeout=4 + xargs kubectl delete ns + desc 'cleaned up old namespaces monitoring-2-0-24852' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-2-0-24852 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-24852 --ignore-not-found ++ seq 0 2 ++ mktemp + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_OUT=/tmp/tmp.0WU6EpYjPw ++ mktemp + local LAST_ERR=/tmp/tmp.hHDa4LXv0q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace monitoring-2-0-24852 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nrpK4DSxxt + cat /tmp/tmp.gyDVHORqQp + rm /tmp/tmp.nrpK4DSxxt /tmp/tmp.gyDVHORqQp + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0WU6EpYjPw + cat /tmp/tmp.hHDa4LXv0q + rm /tmp/tmp.0WU6EpYjPw /tmp/tmp.hHDa4LXv0q + return 0 + kubectl_bin wait --for=delete namespace monitoring-2-0-24852 ++ mktemp + local LAST_OUT=/tmp/tmp.TfR9y2Zs3v ++ mktemp + local LAST_ERR=/tmp/tmp.VvljYrHg5U + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace monitoring-2-0-24852 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TfR9y2Zs3v + cat /tmp/tmp.VvljYrHg5U + rm /tmp/tmp.TfR9y2Zs3v /tmp/tmp.VvljYrHg5U + return 0 + desc 'create namespace monitoring-2-0-24852' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-24852 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-24852 ++ mktemp + local LAST_OUT=/tmp/tmp.KyiSdDjAOe ++ mktemp + local LAST_ERR=/tmp/tmp.kjgy4OagNu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace monitoring-2-0-24852 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KyiSdDjAOe namespace/monitoring-2-0-24852 created + cat /tmp/tmp.kjgy4OagNu + rm /tmp/tmp.KyiSdDjAOe /tmp/tmp.kjgy4OagNu + return 0 + set_kube_ctx monitoring-2-0-24852 + local namespace=monitoring-2-0-24852 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.6C7rn3Buv5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.AjTuLhfH2g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6C7rn3Buv5 ++ cat /tmp/tmp.AjTuLhfH2g ++ rm /tmp/tmp.6C7rn3Buv5 /tmp/tmp.AjTuLhfH2g ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2272-ae4e3cbc-4-cluster13 --namespace=monitoring-2-0-24852 ++ mktemp + local LAST_OUT=/tmp/tmp.IPMDQ3WDap ++ mktemp + local LAST_ERR=/tmp/tmp.trXUVtK1Ye + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2272-ae4e3cbc-4-cluster13 --namespace=monitoring-2-0-24852 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IPMDQ3WDap Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2272-ae4e3cbc-4-cluster13" modified. + cat /tmp/tmp.trXUVtK1Ye + rm /tmp/tmp.IPMDQ3WDap /tmp/tmp.trXUVtK1Ye + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.vYdHXew8Em ++ mktemp + local LAST_ERR=/tmp/tmp.6jIECqmiQY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vYdHXew8Em namespace/cert-manager created + cat /tmp/tmp.6jIECqmiQY + rm /tmp/tmp.vYdHXew8Em /tmp/tmp.6jIECqmiQY + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.R8A3D2rY66 ++ mktemp + local LAST_ERR=/tmp/tmp.x56nllKZcM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.R8A3D2rY66 namespace/cert-manager labeled + cat /tmp/tmp.x56nllKZcM + rm /tmp/tmp.R8A3D2rY66 /tmp/tmp.x56nllKZcM + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.EIHbpLbwL4 ++ mktemp + local LAST_ERR=/tmp/tmp.BePTnTK51L + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EIHbpLbwL4 namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.BePTnTK51L Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.EIHbpLbwL4 /tmp/tmp.BePTnTK51L + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.FmsWmsdqO9 ++ mktemp + local LAST_ERR=/tmp/tmp.V2lzJvoC7U + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FmsWmsdqO9 pod/cert-manager-559d798845-hfrmj condition met pod/cert-manager-cainjector-64958d9c7c-d22m2 condition met pod/cert-manager-webhook-7fb6f99b56-cs6tg condition met + cat /tmp/tmp.V2lzJvoC7U + rm /tmp/tmp.FmsWmsdqO9 /tmp/tmp.V2lzJvoC7U + return 0 + sleep 120 + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + helm repo remove stable "stable" has been removed from your repositories + helm repo add stable https://charts.helm.sh/stable "stable" has been added to your repositories + [[ -n '' ]] + retry 10 60 helm install monitoring --set imageTag=dev-latest --set imageRepo=docker.io/perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageTag=dev-latest --set imageRepo=docker.io/perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Thu Mar 26 09:34:14 2026 NAMESPACE: monitoring-2-0-24852 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-24852.svc.cluster.local:443 login: admin password: admin + sleep 40 + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.jjuDofDnxD ++ mktemp + local LAST_ERR=/tmp/tmp.c3jzkMep7p + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jjuDofDnxD + cat /tmp/tmp.c3jzkMep7p + rm /tmp/tmp.jjuDofDnxD /tmp/tmp.c3jzkMep7p + return 0 + cluster=monitoring + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.PfSiareRhl ++ mktemp + local LAST_ERR=/tmp/tmp.XasXwbABIu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PfSiareRhl secret/some-users created secret/some-users unchanged + cat /tmp/tmp.XasXwbABIu + rm /tmp/tmp.PfSiareRhl /tmp/tmp.XasXwbABIu + return 0 + yq '.spec.template.spec.volumes[0].secret.secretName="monitoring-ssl"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/conf/client_with_tls.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Du8b2qnELu ++ mktemp + local LAST_ERR=/tmp/tmp.jzo4tAMVcA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Du8b2qnELu deployment.apps/psmdb-client created + cat /tmp/tmp.jzo4tAMVcA + rm /tmp/tmp.Du8b2qnELu /tmp/tmp.jzo4tAMVcA + return 0 + sleep 90 + desc 'create first PSMDB cluster monitoring' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster monitoring ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2272-ae4e3cbc"' + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + local LAST_OUT=/tmp/tmp.JNJG8Ea18N + yq eval '.spec.upgradeOptions.apply="Never"' + /usr/sbin/sed -e s/NAME_SPACE/monitoring-2-0-24852/g ++ mktemp + local LAST_ERR=/tmp/tmp.JkZ2Ns3GwC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JNJG8Ea18N perconaservermongodb.psmdb.percona.com/monitoring created + cat /tmp/tmp.JkZ2Ns3GwC + rm /tmp/tmp.JNJG8Ea18N /tmp/tmp.JkZ2Ns3GwC + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BJVlZl4a40 +++ mktemp ++ local LAST_ERR=/tmp/tmp.vgNznCxlLY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BJVlZl4a40 ++ cat /tmp/tmp.vgNznCxlLY ++ rm /tmp/tmp.BJVlZl4a40 /tmp/tmp.vgNznCxlLY ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready..............OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iY3k1E9xf8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.MA2bPzFXSp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iY3k1E9xf8 ++ cat /tmp/tmp.MA2bPzFXSp ++ rm /tmp/tmp.iY3k1E9xf8 /tmp/tmp.MA2bPzFXSp ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SkT0yENU12 +++ mktemp ++ local LAST_ERR=/tmp/tmp.yIE1d1irir ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SkT0yENU12 ++ cat /tmp/tmp.yIE1d1irir ++ rm /tmp/tmp.SkT0yENU12 /tmp/tmp.yIE1d1irir ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness...................... + desc 'check if pmm-client container is not enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container is not enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 -no-pmm + local resource=statefulset/monitoring-rs0 + local postfix=-no-pmm + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml + local new_result=/tmp/tmp.FZuz5QQQV1/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-24852", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.shlXAJZ7we ++ mktemp + local LAST_ERR=/tmp/tmp.wvUDyDfuKV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.shlXAJZ7we + cat /tmp/tmp.wvUDyDfuKV + rm /tmp/tmp.shlXAJZ7we /tmp/tmp.wvUDyDfuKV + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.FZuz5QQQV1/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.FZuz5QQQV1/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.FZuz5QQQV1/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml /tmp/tmp.FZuz5QQQV1/statefulset_monitoring-rs0.yml + log 'compare_kubectl: statefulset/monitoring-rs0 OK' + set +o xtrace [2026-03-26T09:39:01+0000] compare_kubectl: statefulset/monitoring-rs0 OK + sleep 10 + custom_port=27019 + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-24852 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-24852 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HHTWAWWkrn +++ mktemp ++ local LAST_ERR=/tmp/tmp.vECRziFCrn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HHTWAWWkrn ++ cat /tmp/tmp.vECRziFCrn ++ rm /tmp/tmp.HHTWAWWkrn /tmp/tmp.vECRziFCrn ++ return 0 + local client_container=psmdb-client-699f458f75-sk2ch + kubectl_bin exec psmdb-client-699f458f75-sk2ch -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-24852.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.N4nUIyVfzh ++ mktemp + local LAST_ERR=/tmp/tmp.B0te9DJSgD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-sk2ch -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-24852.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.N4nUIyVfzh Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-24852.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-26T09:39:13.963Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("3e25346f-cf64-4442-87c3-f0bdb7288b44") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.B0te9DJSgD + rm /tmp/tmp.N4nUIyVfzh /tmp/tmp.B0te9DJSgD + return 0 + run_mongos 'sh.enableSharding("myApp")' clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-24852 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=sh.enableSharding("myApp")' + local uri=clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-24852 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gOrxM1diQt +++ mktemp ++ local LAST_ERR=/tmp/tmp.W1mXd5Fr6l ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gOrxM1diQt ++ cat /tmp/tmp.W1mXd5Fr6l ++ rm /tmp/tmp.gOrxM1diQt /tmp/tmp.W1mXd5Fr6l ++ return 0 + local client_container=psmdb-client-699f458f75-sk2ch + kubectl_bin exec psmdb-client-699f458f75-sk2ch -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-24852.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.pZGbiPmtVZ ++ mktemp + local LAST_ERR=/tmp/tmp.8OveKU2UW8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-sk2ch -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-24852.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pZGbiPmtVZ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-24852.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-26T09:39:16.581Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("58a80ebd-aee7-4778-9076-e244a759ab4d") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1774517956, 9), "signature" : { "hash" : BinData(0,"lyHVnxViPpVfahZN1s29y+Lq5UY="), "keyId" : NumberLong("7621496196342743064") } }, "operationTime" : Timestamp(1774517956, 6) } bye + cat /tmp/tmp.8OveKU2UW8 + rm /tmp/tmp.pZGbiPmtVZ /tmp/tmp.8OveKU2UW8 + return 0 + insert_data_mongos 100500 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100500 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@monitoring-mongos.monitoring-2-0-24852 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-24852 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ awk -F: '{print $2}' ++ echo .svc.cluster.local + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.62u7XU7aRv +++ mktemp ++ local LAST_ERR=/tmp/tmp.P95cgINyy9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.62u7XU7aRv ++ cat /tmp/tmp.P95cgINyy9 ++ rm /tmp/tmp.62u7XU7aRv /tmp/tmp.P95cgINyy9 ++ return 0 + local client_container=psmdb-client-699f458f75-sk2ch + kubectl_bin exec psmdb-client-699f458f75-sk2ch -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-24852.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.TFFISh9PNU ++ mktemp + local LAST_ERR=/tmp/tmp.Zxe25Rr4NM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-sk2ch -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-24852.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TFFISh9PNU Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-24852.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-26T09:39:19.397Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("5d81f1ea-3d3f-46f9-bb48-18ddddcd1ee8") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.Zxe25Rr4NM + rm /tmp/tmp.TFFISh9PNU /tmp/tmp.Zxe25Rr4NM + return 0 + insert_data_mongos 100600 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100600 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100600 })' myApp:myPass@monitoring-mongos.monitoring-2-0-24852 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100600 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-24852 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PZ0ZRgge7u +++ mktemp ++ local LAST_ERR=/tmp/tmp.nkyW8PUUf0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PZ0ZRgge7u ++ cat /tmp/tmp.nkyW8PUUf0 ++ rm /tmp/tmp.PZ0ZRgge7u /tmp/tmp.nkyW8PUUf0 ++ return 0 + local client_container=psmdb-client-699f458f75-sk2ch + kubectl_bin exec psmdb-client-699f458f75-sk2ch -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-24852.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.RAakiSXBmQ ++ mktemp + local LAST_ERR=/tmp/tmp.5Rc0cAD6gJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-sk2ch -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-24852.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RAakiSXBmQ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-24852.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-26T09:39:22.371Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("d04fa46a-3267-4c66-91c8-506df8e9d5fa") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.5Rc0cAD6gJ + rm /tmp/tmp.RAakiSXBmQ /tmp/tmp.5Rc0cAD6gJ + return 0 + insert_data_mongos 100700 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100700 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100700 })' myApp:myPass@monitoring-mongos.monitoring-2-0-24852 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100700 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-24852 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Kc8A4Au7DX +++ mktemp ++ local LAST_ERR=/tmp/tmp.0S1lyEHMLD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Kc8A4Au7DX ++ cat /tmp/tmp.0S1lyEHMLD ++ rm /tmp/tmp.Kc8A4Au7DX /tmp/tmp.0S1lyEHMLD ++ return 0 + local client_container=psmdb-client-699f458f75-sk2ch + kubectl_bin exec psmdb-client-699f458f75-sk2ch -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-24852.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.LzpySXDlWj ++ mktemp + local LAST_ERR=/tmp/tmp.Lu7Oxrrm47 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-sk2ch -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-24852.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LzpySXDlWj Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-24852.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-26T09:39:25.034Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("e2c99c8d-e590-4f51-9863-93660fa7a153") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.Lu7Oxrrm47 + rm /tmp/tmp.LzpySXDlWj /tmp/tmp.Lu7Oxrrm47 + return 0 + desc 'add PMM_SERVER_API_KEY for secret some-users' + set +o xtrace ----------------------------------------------------------------------------------- add PMM_SERVER_API_KEY for secret some-users ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.mFIpEcHARa +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.HBojctOyKg ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.mFIpEcHARa ++++ cat /tmp/tmp.HBojctOyKg ++++ rm /tmp/tmp.mFIpEcHARa /tmp/tmp.HBojctOyKg ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.AluVwYmKvS +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.RAaKI3BZyI ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.AluVwYmKvS ++++ cat /tmp/tmp.RAaKI3BZyI ++++ rm /tmp/tmp.AluVwYmKvS /tmp/tmp.RAaKI3BZyI ++++ return 0 +++ local ip=34.30.40.245 +++ '[' -n 34.30.40.245 -a 34.30.40.245 '!=' null ']' +++ echo 34.30.40.245 +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@34.30.40.245/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 36 0 0 100 36 0 103 --:--:-- --:--:-- --:--:-- 103 100 155 100 119 100 36 295 89 --:--:-- --:--:-- --:--:-- 384 + API_KEY='"eyJrIjoiSHc4NjdtMHNSbmhqMGVISGcwYTNZRkVlWUxOUGJZSGEiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' + kubectl_bin patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiSHc4NjdtMHNSbmhqMGVISGcwYTNZRkVlWUxOUGJZSGEiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' ++ mktemp + local LAST_OUT=/tmp/tmp.BaKMKtsUcb ++ mktemp + local LAST_ERR=/tmp/tmp.EK55erFhlx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiSHc4NjdtMHNSbmhqMGVISGcwYTNZRkVlWUxOUGJZSGEiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BaKMKtsUcb secret/some-users patched + cat /tmp/tmp.EK55erFhlx + rm /tmp/tmp.BaKMKtsUcb /tmp/tmp.EK55erFhlx + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Yin1vP6IfI +++ mktemp ++ local LAST_ERR=/tmp/tmp.1iljsXD6q2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Yin1vP6IfI ++ cat /tmp/tmp.1iljsXD6q2 ++ rm /tmp/tmp.Yin1vP6IfI /tmp/tmp.1iljsXD6q2 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready.OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jCJRtCaCRq +++ mktemp ++ local LAST_ERR=/tmp/tmp.QaEPkHf8kC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jCJRtCaCRq ++ cat /tmp/tmp.QaEPkHf8kC ++ rm /tmp/tmp.jCJRtCaCRq /tmp/tmp.QaEPkHf8kC ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sxMhGMdiwu +++ mktemp ++ local LAST_ERR=/tmp/tmp.pKYWGTZTU8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sxMhGMdiwu ++ cat /tmp/tmp.pKYWGTZTU8 ++ rm /tmp/tmp.sxMhGMdiwu /tmp/tmp.pKYWGTZTU8 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness........................................................................................................................................................ + sleep 90 + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 + local resource=statefulset/monitoring-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml + local new_result=/tmp/tmp.FZuz5QQQV1/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-24852", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.vR1maClKSX ++ mktemp + local LAST_ERR=/tmp/tmp.jaIRw5eJ5x + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vR1maClKSX + cat /tmp/tmp.jaIRw5eJ5x + rm /tmp/tmp.vR1maClKSX /tmp/tmp.jaIRw5eJ5x + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.FZuz5QQQV1/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.FZuz5QQQV1/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.FZuz5QQQV1/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml /tmp/tmp.FZuz5QQQV1/statefulset_monitoring-rs0.yml + log 'compare_kubectl: statefulset/monitoring-rs0 OK' + set +o xtrace [2026-03-26T09:46:22+0000] compare_kubectl: statefulset/monitoring-rs0 OK + compare_kubectl service/monitoring-rs0 + local resource=service/monitoring-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml + local new_result=/tmp/tmp.FZuz5QQQV1/service_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-24852", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.69lJDS7V10 ++ mktemp + local LAST_ERR=/tmp/tmp.4N8qu0HZqL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.69lJDS7V10 + cat /tmp/tmp.4N8qu0HZqL + rm /tmp/tmp.69lJDS7V10 /tmp/tmp.4N8qu0HZqL + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.FZuz5QQQV1/service_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.FZuz5QQQV1/service_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.FZuz5QQQV1/service_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml /tmp/tmp.FZuz5QQQV1/service_monitoring-rs0.yml + log 'compare_kubectl: service/monitoring-rs0 OK' + set +o xtrace [2026-03-26T09:46:23+0000] compare_kubectl: service/monitoring-rs0 OK + compare_kubectl service/monitoring-mongos + local resource=service/monitoring-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml + local new_result=/tmp/tmp.FZuz5QQQV1/service_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-24852", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.G7sT4ZEtF6 ++ mktemp + local LAST_ERR=/tmp/tmp.QIN40nhCOu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.G7sT4ZEtF6 + cat /tmp/tmp.QIN40nhCOu + rm /tmp/tmp.G7sT4ZEtF6 /tmp/tmp.QIN40nhCOu + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.FZuz5QQQV1/service_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.FZuz5QQQV1/service_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.FZuz5QQQV1/service_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml /tmp/tmp.FZuz5QQQV1/service_monitoring-mongos.yml + log 'compare_kubectl: service/monitoring-mongos OK' + set +o xtrace [2026-03-26T09:46:24+0000] compare_kubectl: service/monitoring-mongos OK + compare_kubectl statefulset/monitoring-cfg + local resource=statefulset/monitoring-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml + local new_result=/tmp/tmp.FZuz5QQQV1/statefulset_monitoring-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-cfg ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-24852", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.BXqpUgN0Vj ++ mktemp + local LAST_ERR=/tmp/tmp.JLYM9DKNwO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BXqpUgN0Vj + cat /tmp/tmp.JLYM9DKNwO + rm /tmp/tmp.BXqpUgN0Vj /tmp/tmp.JLYM9DKNwO + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.FZuz5QQQV1/statefulset_monitoring-cfg.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.FZuz5QQQV1/statefulset_monitoring-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.FZuz5QQQV1/statefulset_monitoring-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml /tmp/tmp.FZuz5QQQV1/statefulset_monitoring-cfg.yml + log 'compare_kubectl: statefulset/monitoring-cfg OK' + set +o xtrace [2026-03-26T09:46:25+0000] compare_kubectl: statefulset/monitoring-cfg OK + compare_kubectl statefulset/monitoring-mongos + local resource=statefulset/monitoring-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml + local new_result=/tmp/tmp.FZuz5QQQV1/statefulset_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-24852", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.d0kTJgqjOq ++ mktemp + local LAST_ERR=/tmp/tmp.UvCAopccmh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.d0kTJgqjOq + cat /tmp/tmp.UvCAopccmh + rm /tmp/tmp.d0kTJgqjOq /tmp/tmp.UvCAopccmh + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.FZuz5QQQV1/statefulset_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.FZuz5QQQV1/statefulset_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.FZuz5QQQV1/statefulset_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml /tmp/tmp.FZuz5QQQV1/statefulset_monitoring-mongos.yml + log 'compare_kubectl: statefulset/monitoring-mongos OK' + set +o xtrace [2026-03-26T09:46:26+0000] compare_kubectl: statefulset/monitoring-mongos OK + desc 'check mongod metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongod metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-24852-monitoring-rs0-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-24852-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774518326 ++ /usr/sbin/date -u +%s + local end=1774518386 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.aC04fTh7NM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PwKLvx9WzR +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.aC04fTh7NM +++ cat /tmp/tmp.PwKLvx9WzR +++ rm /tmp/tmp.aC04fTh7NM /tmp/tmp.PwKLvx9WzR +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.B287qwxW71 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KA1kMHTRWX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.B287qwxW71 +++ cat /tmp/tmp.KA1kMHTRWX +++ rm /tmp/tmp.B287qwxW71 /tmp/tmp.KA1kMHTRWX +++ return 0 ++ local ip=34.30.40.245 ++ '[' -n 34.30.40.245 -a 34.30.40.245 '!=' null ']' ++ echo 34.30.40.245 ++ return + local endpoint=34.30.40.245 + grep '^"[0-9]' + jq '.data.result[0].values[][1]' + curl -s -k 'https://admin:admin@34.30.40.245/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-24852-monitoring-rs0-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-24852-monitoring-rs0-1%22%7D%29&start=1774518326&end=1774518386&step=60' "1774511866" "1774511866" + get_metric_values mongodb_connections monitoring-2-0-24852-monitoring-rs0-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-24852-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774518328 ++ /usr/sbin/date -u +%s + local end=1774518388 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xwctEwUXEQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nEpux5gA06 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.xwctEwUXEQ +++ cat /tmp/tmp.nEpux5gA06 +++ rm /tmp/tmp.xwctEwUXEQ /tmp/tmp.nEpux5gA06 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rhd0taNdA8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Qnp9ialnNU +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.rhd0taNdA8 +++ cat /tmp/tmp.Qnp9ialnNU +++ rm /tmp/tmp.rhd0taNdA8 /tmp/tmp.Qnp9ialnNU +++ return 0 ++ local ip=34.30.40.245 ++ '[' -n 34.30.40.245 -a 34.30.40.245 '!=' null ']' ++ echo 34.30.40.245 ++ return + local endpoint=34.30.40.245 + curl -s -k 'https://admin:admin@34.30.40.245/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-24852-monitoring-rs0-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-24852-monitoring-rs0-1%22%7D%29&start=1774518328&end=1774518388&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "0" "0" + desc 'check mongo config metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongo config metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-24852-monitoring-cfg-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-24852-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774518331 ++ /usr/sbin/date -u +%s + local end=1774518391 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.FLDAiQd65G ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Ol5x2TdJuY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.FLDAiQd65G +++ cat /tmp/tmp.Ol5x2TdJuY +++ rm /tmp/tmp.FLDAiQd65G /tmp/tmp.Ol5x2TdJuY +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4WWpJg26wL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.izvyydQR9C +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4WWpJg26wL +++ cat /tmp/tmp.izvyydQR9C +++ rm /tmp/tmp.4WWpJg26wL /tmp/tmp.izvyydQR9C +++ return 0 ++ local ip=34.30.40.245 ++ '[' -n 34.30.40.245 -a 34.30.40.245 '!=' null ']' ++ echo 34.30.40.245 ++ return + local endpoint=34.30.40.245 + curl -s -k 'https://admin:admin@34.30.40.245/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-24852-monitoring-cfg-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-24852-monitoring-cfg-1%22%7D%29&start=1774518331&end=1774518391&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "1774511869" "1774511869" + get_metric_values mongodb_connections monitoring-2-0-24852-monitoring-cfg-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-24852-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774518333 ++ /usr/sbin/date -u +%s + local end=1774518393 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ jq '.status.loadBalancer.ingress[].hostname' +++ local LAST_OUT=/tmp/tmp.cK9KmIJYgt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vOZdV1LIbP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.cK9KmIJYgt +++ cat /tmp/tmp.vOZdV1LIbP +++ rm /tmp/tmp.cK9KmIJYgt /tmp/tmp.vOZdV1LIbP +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.yUR5Z14cZ9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Pt7dhL8n3x +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.yUR5Z14cZ9 +++ cat /tmp/tmp.Pt7dhL8n3x +++ rm /tmp/tmp.yUR5Z14cZ9 /tmp/tmp.Pt7dhL8n3x +++ return 0 ++ local ip=34.30.40.245 ++ '[' -n 34.30.40.245 -a 34.30.40.245 '!=' null ']' ++ echo 34.30.40.245 ++ return + local endpoint=34.30.40.245 + curl -s -k 'https://admin:admin@34.30.40.245/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-24852-monitoring-cfg-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-24852-monitoring-cfg-1%22%7D%29&start=1774518333&end=1774518393&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "0" "0" + desc 'check mongos metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongos metrics ----------------------------------------------------------------------------------- ++ kubectl get pod -l app.kubernetes.io/component=mongos -o 'jsonpath={.items[0].metadata.name}' + MONGOS_POD_NAME=monitoring-mongos-0 + get_metric_values node_boot_time_seconds monitoring-2-0-24852-monitoring-mongos-0 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-24852-monitoring-mongos-0 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774518336 ++ /usr/sbin/date -u +%s + local end=1774518396 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Hfk6WriU5Q ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cnQnP9Labq +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Hfk6WriU5Q +++ cat /tmp/tmp.cnQnP9Labq +++ rm /tmp/tmp.Hfk6WriU5Q /tmp/tmp.cnQnP9Labq +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XU0BnQPcz2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NlgrtgMEaB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.XU0BnQPcz2 +++ cat /tmp/tmp.NlgrtgMEaB +++ rm /tmp/tmp.XU0BnQPcz2 /tmp/tmp.NlgrtgMEaB +++ return 0 ++ local ip=34.30.40.245 ++ '[' -n 34.30.40.245 -a 34.30.40.245 '!=' null ']' ++ echo 34.30.40.245 ++ return + local endpoint=34.30.40.245 + curl -s -k 'https://admin:admin@34.30.40.245/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-24852-monitoring-mongos-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-24852-monitoring-mongos-0%22%7D%29&start=1774518336&end=1774518396&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "1774511866" "1774511866" + sleep 90 + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan_values mongodb dev-mongod admin:admin + local service_type=mongodb + local environment=dev-mongod + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/sbin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2026-03-25T21:48:09+00:00 ++ /usr/sbin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2026-03-26T09:48:09+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.qpmqykJvtZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NvtHx51v7T +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.qpmqykJvtZ +++ cat /tmp/tmp.NvtHx51v7T +++ rm /tmp/tmp.qpmqykJvtZ /tmp/tmp.NvtHx51v7T +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.R65b15AAAk ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QrQZe6HIbK +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.R65b15AAAk +++ cat /tmp/tmp.QrQZe6HIbK +++ rm /tmp/tmp.R65b15AAAk /tmp/tmp.QrQZe6HIbK +++ return 0 ++ local ip=34.30.40.245 ++ '[' -n 34.30.40.245 -a 34.30.40.245 '!=' null ']' ++ echo 34.30.40.245 ++ return + endpoint=34.30.40.245 + cat + local response + retry=0 ++ curl -s -k -XPOST -d @payload.json https://admin:admin@34.30.40.245/v0/qan/GetReport ++ jq '.rows[].sparkline' + [[ [ { "time_frame": 360, "timestamp": "2026-03-26T09:48:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-26T09:42:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-26T09:36:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-26T09:30:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-26T09:24:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-26T09:18:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-26T09:12:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-26T09:06:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-26T09:00:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-26T08:54:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-26T08:48:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-26T08:42:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-26T08:36:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-26T08:30:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-26T08:24:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-26T08:18:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-26T08:12:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-26T08:06:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-26T08:00:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-26T07:54:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-26T07:48:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-26T07:42:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-26T07:36:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-26T07:30:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-26T07:24:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-26T07:18:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-26T07:12:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-26T07:06:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-26T07:00:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-26T06:54:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-26T06:48:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-26T06:42:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-26T06:36:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-26T06:30:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-26T06:24:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-26T06:18:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-26T06:12:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-26T06:06:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-26T06:00:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-26T05:54:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-26T05:48:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-26T05:42:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-26T05:36:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-26T05:30:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-26T05:24:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-26T05:18:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-26T05:12:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-26T05:06:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-26T05:00:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-26T04:54:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-26T04:48:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-26T04:42:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-26T04:36:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-26T04:30:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-26T04:24:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-26T04:18:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-26T04:12:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-26T04:06:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-26T04:00:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-26T03:54:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-26T03:48:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-26T03:42:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-26T03:36:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-26T03:30:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-26T03:24:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-26T03:18:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-26T03:12:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-26T03:06:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-26T03:00:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-26T02:54:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-26T02:48:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-26T02:42:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-26T02:36:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-26T02:30:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-26T02:24:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-26T02:18:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-26T02:12:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-26T02:06:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-26T02:00:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-26T01:54:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-26T01:48:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-26T01:42:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-26T01:36:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-26T01:30:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-26T01:24:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-26T01:18:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-26T01:12:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-26T01:06:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-26T01:00:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-26T00:54:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-26T00:48:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-26T00:42:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-26T00:36:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-26T00:30:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-26T00:24:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-26T00:18:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-26T00:12:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-26T00:06:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-26T00:00:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-25T23:54:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-25T23:48:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-25T23:42:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-25T23:36:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-25T23:30:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-25T23:24:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-25T23:18:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-25T23:12:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-25T23:06:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-25T23:00:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-25T22:54:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-25T22:48:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-25T22:42:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-25T22:36:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-25T22:30:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-25T22:24:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-25T22:18:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-25T22:12:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-25T22:06:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-25T22:00:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-25T21:54:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-26T09:48:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-26T09:42:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-26T09:36:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-26T09:30:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-26T09:24:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-26T09:18:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-26T09:12:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-26T09:06:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-26T09:00:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-26T08:54:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-26T08:48:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-26T08:42:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-26T08:36:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-26T08:30:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-26T08:24:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-26T08:18:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-26T08:12:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-26T08:06:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-26T08:00:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-26T07:54:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-26T07:48:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-26T07:42:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-26T07:36:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-26T07:30:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-26T07:24:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-26T07:18:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-26T07:12:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-26T07:06:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-26T07:00:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-26T06:54:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-26T06:48:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-26T06:42:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-26T06:36:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-26T06:30:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-26T06:24:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-26T06:18:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-26T06:12:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-26T06:06:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-26T06:00:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-26T05:54:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-26T05:48:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-26T05:42:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-26T05:36:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-26T05:30:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-26T05:24:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-26T05:18:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-26T05:12:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-26T05:06:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-26T05:00:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-26T04:54:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-26T04:48:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-26T04:42:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-26T04:36:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-26T04:30:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-26T04:24:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-26T04:18:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-26T04:12:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-26T04:06:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-26T04:00:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-26T03:54:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-26T03:48:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-26T03:42:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-26T03:36:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-26T03:30:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-26T03:24:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-26T03:18:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-26T03:12:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-26T03:06:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-26T03:00:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-26T02:54:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-26T02:48:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-26T02:42:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-26T02:36:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-26T02:30:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-26T02:24:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-26T02:18:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-26T02:12:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-26T02:06:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-26T02:00:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-26T01:54:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-26T01:48:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-26T01:42:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-26T01:36:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-26T01:30:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-26T01:24:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-26T01:18:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-26T01:12:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-26T01:06:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-26T01:00:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-26T00:54:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-26T00:48:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-26T00:42:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-26T00:36:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-26T00:30:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-26T00:24:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-26T00:18:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-26T00:12:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-26T00:06:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-26T00:00:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-25T23:54:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-25T23:48:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-25T23:42:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-25T23:36:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-25T23:30:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-25T23:24:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-25T23:18:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-25T23:12:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-25T23:06:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-25T23:00:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-25T22:54:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-25T22:48:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-25T22:42:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-25T22:36:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-25T22:30:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-25T22:24:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-25T22:18:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-25T22:12:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-25T22:06:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-25T22:00:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-25T21:54:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-26T09:48:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-26T09:42:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-26T09:36:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-26T09:30:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-26T09:24:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-26T09:18:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-26T09:12:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-26T09:06:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-26T09:00:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-26T08:54:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-26T08:48:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-26T08:42:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-26T08:36:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-26T08:30:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-26T08:24:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-26T08:18:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-26T08:12:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-26T08:06:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-26T08:00:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-26T07:54:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-26T07:48:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-26T07:42:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-26T07:36:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-26T07:30:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-26T07:24:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-26T07:18:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-26T07:12:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-26T07:06:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-26T07:00:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-26T06:54:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-26T06:48:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-26T06:42:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-26T06:36:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-26T06:30:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-26T06:24:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-26T06:18:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-26T06:12:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-26T06:06:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-26T06:00:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-26T05:54:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-26T05:48:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-26T05:42:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-26T05:36:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-26T05:30:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-26T05:24:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-26T05:18:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-26T05:12:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-26T05:06:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-26T05:00:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-26T04:54:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-26T04:48:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-26T04:42:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-26T04:36:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-26T04:30:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-26T04:24:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-26T04:18:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-26T04:12:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-26T04:06:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-26T04:00:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-26T03:54:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-26T03:48:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-26T03:42:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-26T03:36:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-26T03:30:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-26T03:24:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-26T03:18:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-26T03:12:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-26T03:06:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-26T03:00:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-26T02:54:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-26T02:48:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-26T02:42:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-26T02:36:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-26T02:30:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-26T02:24:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-26T02:18:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-26T02:12:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-26T02:06:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-26T02:00:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-26T01:54:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-26T01:48:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-26T01:42:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-26T01:36:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-26T01:30:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-26T01:24:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-26T01:18:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-26T01:12:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-26T01:06:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-26T01:00:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-26T00:54:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-26T00:48:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-26T00:42:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-26T00:36:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-26T00:30:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-26T00:24:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-26T00:18:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-26T00:12:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-26T00:06:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-26T00:00:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-25T23:54:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-25T23:48:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-25T23:42:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-25T23:36:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-25T23:30:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-25T23:24:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-25T23:18:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-25T23:12:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-25T23:06:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-25T23:00:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-25T22:54:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-25T22:48:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-25T22:42:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-25T22:36:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-25T22:30:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-25T22:24:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-25T22:18:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-25T22:12:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-25T22:06:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-25T22:00:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-25T21:54:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-26T09:48:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-26T09:42:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-26T09:36:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-26T09:30:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-26T09:24:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-26T09:18:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-26T09:12:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-26T09:06:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-26T09:00:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-26T08:54:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-26T08:48:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-26T08:42:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-26T08:36:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-26T08:30:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-26T08:24:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-26T08:18:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-26T08:12:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-26T08:06:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-26T08:00:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-26T07:54:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-26T07:48:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-26T07:42:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-26T07:36:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-26T07:30:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-26T07:24:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-26T07:18:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-26T07:12:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-26T07:06:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-26T07:00:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-26T06:54:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-26T06:48:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-26T06:42:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-26T06:36:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-26T06:30:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-26T06:24:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-26T06:18:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-26T06:12:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-26T06:06:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-26T06:00:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-26T05:54:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-26T05:48:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-26T05:42:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-26T05:36:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-26T05:30:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-26T05:24:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-26T05:18:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-26T05:12:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-26T05:06:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-26T05:00:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-26T04:54:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-26T04:48:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-26T04:42:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-26T04:36:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-26T04:30:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-26T04:24:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-26T04:18:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-26T04:12:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-26T04:06:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-26T04:00:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-26T03:54:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-26T03:48:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-26T03:42:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-26T03:36:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-26T03:30:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-26T03:24:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-26T03:18:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-26T03:12:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-26T03:06:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-26T03:00:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-26T02:54:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-26T02:48:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-26T02:42:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-26T02:36:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-26T02:30:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-26T02:24:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-26T02:18:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-26T02:12:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-26T02:06:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-26T02:00:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-26T01:54:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-26T01:48:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-26T01:42:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-26T01:36:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-26T01:30:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-26T01:24:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-26T01:18:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-26T01:12:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-26T01:06:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-26T01:00:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-26T00:54:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-26T00:48:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-26T00:42:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-26T00:36:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-26T00:30:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-26T00:24:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-26T00:18:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-26T00:12:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-26T00:06:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-26T00:00:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-25T23:54:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-25T23:48:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-25T23:42:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-25T23:36:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-25T23:30:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-25T23:24:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-25T23:18:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-25T23:12:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-25T23:06:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-25T23:00:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-25T22:54:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-25T22:48:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-25T22:42:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-25T22:36:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-25T22:30:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-25T22:24:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-25T22:18:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-25T22:12:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-25T22:06:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-25T22:00:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-25T21:54:00Z" } ] != \n\u\l\l ]] + rm -f payload.json + get_qan_values mongodb dev-mongos admin:admin + local service_type=mongodb + local environment=dev-mongos + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/sbin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2026-03-25T21:48:12+00:00 ++ /usr/sbin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2026-03-26T09:48:12+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.cLqIUQu0iL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.C0CKkU6Y1v +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.cLqIUQu0iL +++ cat /tmp/tmp.C0CKkU6Y1v +++ rm /tmp/tmp.cLqIUQu0iL /tmp/tmp.C0CKkU6Y1v +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OuXb7GdWh4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7HPbEo8gVJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.OuXb7GdWh4 +++ cat /tmp/tmp.7HPbEo8gVJ +++ rm /tmp/tmp.OuXb7GdWh4 /tmp/tmp.7HPbEo8gVJ +++ return 0 ++ local ip=34.30.40.245 ++ '[' -n 34.30.40.245 -a 34.30.40.245 '!=' null ']' ++ echo 34.30.40.245 ++ return + endpoint=34.30.40.245 + cat + local response + retry=0 ++ curl -s -k -XPOST -d @payload.json https://admin:admin@34.30.40.245/v0/qan/GetReport ++ jq '.rows[].sparkline' + [[ [ { "time_frame": 360, "timestamp": "2026-03-26T09:48:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-26T09:42:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-26T09:36:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-26T09:30:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-26T09:24:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-26T09:18:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-26T09:12:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-26T09:06:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-26T09:00:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-26T08:54:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-26T08:48:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-26T08:42:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-26T08:36:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-26T08:30:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-26T08:24:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-26T08:18:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-26T08:12:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-26T08:06:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-26T08:00:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-26T07:54:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-26T07:48:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-26T07:42:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-26T07:36:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-26T07:30:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-26T07:24:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-26T07:18:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-26T07:12:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-26T07:06:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-26T07:00:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-26T06:54:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-26T06:48:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-26T06:42:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-26T06:36:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-26T06:30:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-26T06:24:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-26T06:18:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-26T06:12:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-26T06:06:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-26T06:00:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-26T05:54:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-26T05:48:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-26T05:42:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-26T05:36:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-26T05:30:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-26T05:24:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-26T05:18:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-26T05:12:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-26T05:06:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-26T05:00:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-26T04:54:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-26T04:48:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-26T04:42:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-26T04:36:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-26T04:30:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-26T04:24:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-26T04:18:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-26T04:12:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-26T04:06:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-26T04:00:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-26T03:54:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-26T03:48:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-26T03:42:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-26T03:36:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-26T03:30:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-26T03:24:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-26T03:18:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-26T03:12:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-26T03:06:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-26T03:00:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-26T02:54:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-26T02:48:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-26T02:42:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-26T02:36:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-26T02:30:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-26T02:24:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-26T02:18:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-26T02:12:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-26T02:06:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-26T02:00:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-26T01:54:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-26T01:48:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-26T01:42:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-26T01:36:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-26T01:30:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-26T01:24:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-26T01:18:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-26T01:12:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-26T01:06:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-26T01:00:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-26T00:54:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-26T00:48:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-26T00:42:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-26T00:36:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-26T00:30:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-26T00:24:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-26T00:18:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-26T00:12:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-26T00:06:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-26T00:00:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-25T23:54:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-25T23:48:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-25T23:42:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-25T23:36:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-25T23:30:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-25T23:24:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-25T23:18:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-25T23:12:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-25T23:06:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-25T23:00:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-25T22:54:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-25T22:48:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-25T22:42:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-25T22:36:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-25T22:30:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-25T22:24:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-25T22:18:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-25T22:12:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-25T22:06:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-25T22:00:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-25T21:54:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-26T09:48:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-26T09:42:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-26T09:36:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-26T09:30:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-26T09:24:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-26T09:18:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-26T09:12:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-26T09:06:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-26T09:00:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-26T08:54:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-26T08:48:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-26T08:42:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-26T08:36:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-26T08:30:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-26T08:24:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-26T08:18:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-26T08:12:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-26T08:06:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-26T08:00:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-26T07:54:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-26T07:48:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-26T07:42:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-26T07:36:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-26T07:30:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-26T07:24:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-26T07:18:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-26T07:12:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-26T07:06:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-26T07:00:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-26T06:54:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-26T06:48:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-26T06:42:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-26T06:36:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-26T06:30:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-26T06:24:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-26T06:18:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-26T06:12:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-26T06:06:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-26T06:00:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-26T05:54:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-26T05:48:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-26T05:42:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-26T05:36:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-26T05:30:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-26T05:24:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-26T05:18:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-26T05:12:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-26T05:06:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-26T05:00:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-26T04:54:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-26T04:48:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-26T04:42:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-26T04:36:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-26T04:30:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-26T04:24:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-26T04:18:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-26T04:12:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-26T04:06:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-26T04:00:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-26T03:54:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-26T03:48:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-26T03:42:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-26T03:36:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-26T03:30:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-26T03:24:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-26T03:18:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-26T03:12:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-26T03:06:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-26T03:00:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-26T02:54:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-26T02:48:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-26T02:42:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-26T02:36:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-26T02:30:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-26T02:24:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-26T02:18:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-26T02:12:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-26T02:06:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-26T02:00:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-26T01:54:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-26T01:48:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-26T01:42:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-26T01:36:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-26T01:30:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-26T01:24:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-26T01:18:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-26T01:12:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-26T01:06:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-26T01:00:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-26T00:54:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-26T00:48:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-26T00:42:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-26T00:36:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-26T00:30:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-26T00:24:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-26T00:18:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-26T00:12:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-26T00:06:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-26T00:00:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-25T23:54:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-25T23:48:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-25T23:42:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-25T23:36:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-25T23:30:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-25T23:24:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-25T23:18:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-25T23:12:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-25T23:06:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-25T23:00:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-25T22:54:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-25T22:48:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-25T22:42:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-25T22:36:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-25T22:30:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-25T22:24:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-25T22:18:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-25T22:12:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-25T22:06:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-25T22:00:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-25T21:54:00Z" } ] != \n\u\l\l ]] + rm -f payload.json + nodeList=($(get_node_id_from_pmm)) ++ get_node_id_from_pmm ++ nodeList=() ++ local -a nodeList +++ kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JOE8TKYovw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nIp3J9LbLt +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.JOE8TKYovw +++ cat /tmp/tmp.nIp3J9LbLt +++ rm /tmp/tmp.JOE8TKYovw /tmp/tmp.nIp3J9LbLt +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Nhryg6cKpq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TMK7rcL035 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Nhryg6cKpq +++ cat /tmp/tmp.TMK7rcL035 +++ rm /tmp/tmp.Nhryg6cKpq /tmp/tmp.TMK7rcL035 +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.i9hnMoasv4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.X5fqqUOEu6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.i9hnMoasv4 +++ cat /tmp/tmp.X5fqqUOEu6 +++ rm /tmp/tmp.i9hnMoasv4 /tmp/tmp.X5fqqUOEu6 +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Zy56bmIoGT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yapncQDlvd +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Zy56bmIoGT +++ cat /tmp/tmp.yapncQDlvd +++ rm /tmp/tmp.Zy56bmIoGT /tmp/tmp.yapncQDlvd +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7eZItqILkN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.W4tKghnf2o +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.7eZItqILkN +++ cat /tmp/tmp.W4tKghnf2o +++ rm /tmp/tmp.7eZItqILkN /tmp/tmp.W4tKghnf2o +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ jq -r .pmm_agent_status.node_id +++ local LAST_OUT=/tmp/tmp.5eVMUiFi1V ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qDtB5c2XXY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.5eVMUiFi1V +++ cat /tmp/tmp.qDtB5c2XXY +++ rm /tmp/tmp.5eVMUiFi1V /tmp/tmp.qDtB5c2XXY +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Ounq74DGqK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Emfhu2xQEU +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Ounq74DGqK +++ cat /tmp/tmp.Emfhu2xQEU +++ rm /tmp/tmp.Ounq74DGqK /tmp/tmp.Emfhu2xQEU +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ jq -r .pmm_agent_status.node_id +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aKXWuneONX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RIPP05OoKN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.aKXWuneONX +++ cat /tmp/tmp.RIPP05OoKN +++ rm /tmp/tmp.aKXWuneONX /tmp/tmp.RIPP05OoKN +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nbAE7MhI1S ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9zcGOPFlkU +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.nbAE7MhI1S +++ cat /tmp/tmp.9zcGOPFlkU +++ rm /tmp/tmp.nbAE7MhI1S /tmp/tmp.9zcGOPFlkU +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.d4zSIPHvV9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qsGZKlKg85 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.d4zSIPHvV9 +++ cat /tmp/tmp.qsGZKlKg85 +++ rm /tmp/tmp.d4zSIPHvV9 /tmp/tmp.qsGZKlKg85 +++ return 0 ++ echo /node_id/28640ebe-d995-4112-a260-4cf68f407df7 /node_id/966badb4-70cc-49fb-a038-d82deec37007 /node_id/b823740a-addf-47d7-a8a4-c9996a727f4c /node_id/fe2f5228-5fff-406b-850d-5e628a437cf8 /node_id/45626b15-efd5-4b89-9f27-097301a33fc2 /node_id/ea395276-6c73-4aa9-88da-3371bcda54d2 /node_id/e6a28bfe-b2f9-4ff1-8d21-963d70b4d373 /node_id/7975fea8-f43c-4a74-b05a-a4a0b27d2283 /node_id/9a7b7267-4548-4b17-a155-f5f2c9037f7d + nodeList_from_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/28640ebe-d995-4112-a260-4cf68f407df7 /node_id/966badb4-70cc-49fb-a038-d82deec37007 /node_id/b823740a-addf-47d7-a8a4-c9996a727f4c /node_id/fe2f5228-5fff-406b-850d-5e628a437cf8 /node_id/45626b15-efd5-4b89-9f27-097301a33fc2 /node_id/ea395276-6c73-4aa9-88da-3371bcda54d2 /node_id/e6a28bfe-b2f9-4ff1-8d21-963d70b4d373 /node_id/7975fea8-f43c-4a74-b05a-a4a0b27d2283 /node_id/9a7b7267-4548-4b17-a155-f5f2c9037f7d ++ nodeList=('/node_id/28640ebe-d995-4112-a260-4cf68f407df7' '/node_id/966badb4-70cc-49fb-a038-d82deec37007' '/node_id/b823740a-addf-47d7-a8a4-c9996a727f4c' '/node_id/fe2f5228-5fff-406b-850d-5e628a437cf8' '/node_id/45626b15-efd5-4b89-9f27-097301a33fc2' '/node_id/ea395276-6c73-4aa9-88da-3371bcda54d2' '/node_id/e6a28bfe-b2f9-4ff1-8d21-963d70b4d373' '/node_id/7975fea8-f43c-4a74-b05a-a4a0b27d2283' '/node_id/9a7b7267-4548-4b17-a155-f5f2c9037f7d') ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/28640ebe-d995-4112-a260-4cf68f407df7 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.i3t4qPW4zv +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.n5SVKD9DFZ ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.i3t4qPW4zv ++++ cat /tmp/tmp.n5SVKD9DFZ ++++ rm /tmp/tmp.i3t4qPW4zv /tmp/tmp.n5SVKD9DFZ ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.qYRud0WRBw +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.7wXlL4yMTP ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.qYRud0WRBw ++++ cat /tmp/tmp.7wXlL4yMTP ++++ rm /tmp/tmp.qYRud0WRBw /tmp/tmp.7wXlL4yMTP ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Lf6gtmrwZs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.EE7LSkrcBc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Lf6gtmrwZs +++ cat /tmp/tmp.EE7LSkrcBc +++ rm /tmp/tmp.Lf6gtmrwZs /tmp/tmp.EE7LSkrcBc +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/966badb4-70cc-49fb-a038-d82deec37007 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.AghWEvKlDL +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.DVaKNB6iu6 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.AghWEvKlDL ++++ cat /tmp/tmp.DVaKNB6iu6 ++++ rm /tmp/tmp.AghWEvKlDL /tmp/tmp.DVaKNB6iu6 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.pLQ8CvVjUi +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.a8qFm66jAY ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.pLQ8CvVjUi ++++ cat /tmp/tmp.a8qFm66jAY ++++ rm /tmp/tmp.pLQ8CvVjUi /tmp/tmp.a8qFm66jAY ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ljGIUO9qJ4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qoXgiOLxKP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ljGIUO9qJ4 +++ cat /tmp/tmp.qoXgiOLxKP +++ rm /tmp/tmp.ljGIUO9qJ4 /tmp/tmp.qoXgiOLxKP +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/b823740a-addf-47d7-a8a4-c9996a727f4c +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ydUzRNYXSb +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.dUB8Paassu ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.ydUzRNYXSb ++++ cat /tmp/tmp.dUB8Paassu ++++ rm /tmp/tmp.ydUzRNYXSb /tmp/tmp.dUB8Paassu ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.texfyikrtD +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.c0bWvZD1eN ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.texfyikrtD ++++ cat /tmp/tmp.c0bWvZD1eN ++++ rm /tmp/tmp.texfyikrtD /tmp/tmp.c0bWvZD1eN ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2w1Rx7l1c5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4as8sN79Tv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2w1Rx7l1c5 +++ cat /tmp/tmp.4as8sN79Tv +++ rm /tmp/tmp.2w1Rx7l1c5 /tmp/tmp.4as8sN79Tv +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/fe2f5228-5fff-406b-850d-5e628a437cf8 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.r6sBukHxFi +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.v4cR8qLb83 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.r6sBukHxFi ++++ cat /tmp/tmp.v4cR8qLb83 ++++ rm /tmp/tmp.r6sBukHxFi /tmp/tmp.v4cR8qLb83 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.hTH3XymWwI +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.hQmzGMmV97 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.hTH3XymWwI ++++ cat /tmp/tmp.hQmzGMmV97 ++++ rm /tmp/tmp.hTH3XymWwI /tmp/tmp.hQmzGMmV97 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.n4drNIMLUx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7zejaaKJlI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.n4drNIMLUx +++ cat /tmp/tmp.7zejaaKJlI +++ rm /tmp/tmp.n4drNIMLUx /tmp/tmp.7zejaaKJlI +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/45626b15-efd5-4b89-9f27-097301a33fc2 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.PBaYLDKeVA +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Qjymb7N6Lg ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.PBaYLDKeVA ++++ cat /tmp/tmp.Qjymb7N6Lg ++++ rm /tmp/tmp.PBaYLDKeVA /tmp/tmp.Qjymb7N6Lg ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.My7Qsxm3mW +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.yfv2hKzLBN ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.My7Qsxm3mW ++++ cat /tmp/tmp.yfv2hKzLBN ++++ rm /tmp/tmp.My7Qsxm3mW /tmp/tmp.yfv2hKzLBN ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.bVILblTcnY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.dVs9LopdqD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.bVILblTcnY +++ cat /tmp/tmp.dVs9LopdqD +++ rm /tmp/tmp.bVILblTcnY /tmp/tmp.dVs9LopdqD +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/ea395276-6c73-4aa9-88da-3371bcda54d2 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.bKvNMymiUQ +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.VDHfMCewwB ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.bKvNMymiUQ ++++ cat /tmp/tmp.VDHfMCewwB ++++ rm /tmp/tmp.bKvNMymiUQ /tmp/tmp.VDHfMCewwB ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.xVk22rd0QQ +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.uTGfOwWS32 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.xVk22rd0QQ ++++ cat /tmp/tmp.uTGfOwWS32 ++++ rm /tmp/tmp.xVk22rd0QQ /tmp/tmp.uTGfOwWS32 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pC3aiXg9DK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4htPalawNn +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.pC3aiXg9DK +++ cat /tmp/tmp.4htPalawNn +++ rm /tmp/tmp.pC3aiXg9DK /tmp/tmp.4htPalawNn +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/e6a28bfe-b2f9-4ff1-8d21-963d70b4d373 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ftiYvACHZs +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.gw4aatFnc7 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.ftiYvACHZs ++++ cat /tmp/tmp.gw4aatFnc7 ++++ rm /tmp/tmp.ftiYvACHZs /tmp/tmp.gw4aatFnc7 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.GefgPrMKY0 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.1cWYMNLLlT ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.GefgPrMKY0 ++++ cat /tmp/tmp.1cWYMNLLlT ++++ rm /tmp/tmp.GefgPrMKY0 /tmp/tmp.1cWYMNLLlT ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XltHU5PxxD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CIcOS3r9c6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.XltHU5PxxD +++ cat /tmp/tmp.CIcOS3r9c6 +++ rm /tmp/tmp.XltHU5PxxD /tmp/tmp.CIcOS3r9c6 +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/7975fea8-f43c-4a74-b05a-a4a0b27d2283 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.neJ9t2Xcvq +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Dp1dTKksHQ ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.neJ9t2Xcvq ++++ cat /tmp/tmp.Dp1dTKksHQ ++++ rm /tmp/tmp.neJ9t2Xcvq /tmp/tmp.Dp1dTKksHQ ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.B1w1CPQQTH +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.LNdc2YvSTe ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.B1w1CPQQTH ++++ cat /tmp/tmp.LNdc2YvSTe ++++ rm /tmp/tmp.B1w1CPQQTH /tmp/tmp.LNdc2YvSTe ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.M8gBumvG1G ++++ mktemp +++ local LAST_ERR=/tmp/tmp.i2y8gNZD02 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.M8gBumvG1G +++ cat /tmp/tmp.i2y8gNZD02 +++ rm /tmp/tmp.M8gBumvG1G /tmp/tmp.i2y8gNZD02 +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/9a7b7267-4548-4b17-a155-f5f2c9037f7d ++++ get_pmm_service_ip monitoring-service +++ awk '{print $4}' ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.YVdEuiRZcx +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.XHmKfk2BAx ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.YVdEuiRZcx ++++ cat /tmp/tmp.XHmKfk2BAx ++++ rm /tmp/tmp.YVdEuiRZcx /tmp/tmp.XHmKfk2BAx ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.CN38vzfzcv +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.NMBljLkW8f ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.CN38vzfzcv ++++ cat /tmp/tmp.NMBljLkW8f ++++ rm /tmp/tmp.CN38vzfzcv /tmp/tmp.NMBljLkW8f ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Uz1D78ypgL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.C7YrdzN6EQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Uz1D78ypgL +++ cat /tmp/tmp.C7YrdzN6EQ +++ rm /tmp/tmp.Uz1D78ypgL /tmp/tmp.C7YrdzN6EQ +++ return 0 ++ echo /node_id/28640ebe-d995-4112-a260-4cf68f407df7 /node_id/966badb4-70cc-49fb-a038-d82deec37007 /node_id/b823740a-addf-47d7-a8a4-c9996a727f4c /node_id/fe2f5228-5fff-406b-850d-5e628a437cf8 /node_id/45626b15-efd5-4b89-9f27-097301a33fc2 /node_id/ea395276-6c73-4aa9-88da-3371bcda54d2 /node_id/e6a28bfe-b2f9-4ff1-8d21-963d70b4d373 /node_id/7975fea8-f43c-4a74-b05a-a4a0b27d2283 /node_id/9a7b7267-4548-4b17-a155-f5f2c9037f7d + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/28640ebe-d995-4112-a260-4cf68f407df7 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/966badb4-70cc-49fb-a038-d82deec37007 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/b823740a-addf-47d7-a8a4-c9996a727f4c ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/fe2f5228-5fff-406b-850d-5e628a437cf8 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/45626b15-efd5-4b89-9f27-097301a33fc2 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/ea395276-6c73-4aa9-88da-3371bcda54d2 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/e6a28bfe-b2f9-4ff1-8d21-963d70b4d373 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/7975fea8-f43c-4a74-b05a-a4a0b27d2283 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/9a7b7267-4548-4b17-a155-f5f2c9037f7d ']' + kubectl_bin patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.A3KkE6ErmO ++ mktemp + local LAST_ERR=/tmp/tmp.6P3MKXWGhL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.A3KkE6ErmO perconaservermongodb.psmdb.percona.com/monitoring patched + cat /tmp/tmp.6P3MKXWGhL + rm /tmp/tmp.A3KkE6ErmO /tmp/tmp.6P3MKXWGhL + return 0 + wait_for_delete pod/monitoring-mongos-0 + local res=pod/monitoring-mongos-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-mongos-0 to be deleted.........................Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found + wait_for_delete pod/monitoring-rs0-0 + local res=pod/monitoring-rs0-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-rs0-0 to be deleted..........Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found + wait_for_delete pod/monitoring-cfg-0 + local res=pod/monitoring-cfg-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-cfg-0 to be deleted.......Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found + desc 'check if services are not deleted' + set +o xtrace ----------------------------------------------------------------------------------- check if services are not deleted ----------------------------------------------------------------------------------- + kubectl_bin get svc monitoring-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.NqixtbkA6U ++ mktemp + local LAST_ERR=/tmp/tmp.fN9DkiXSnj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NqixtbkA6U NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-rs0 ClusterIP None 27019/TCP 14m + cat /tmp/tmp.fN9DkiXSnj + rm /tmp/tmp.NqixtbkA6U /tmp/tmp.fN9DkiXSnj + return 0 + kubectl_bin get svc monitoring-cfg ++ mktemp + local LAST_OUT=/tmp/tmp.TabL42wjoa ++ mktemp + local LAST_ERR=/tmp/tmp.POsblAbYwt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TabL42wjoa NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-cfg ClusterIP None 27019/TCP 14m + cat /tmp/tmp.POsblAbYwt + rm /tmp/tmp.TabL42wjoa /tmp/tmp.POsblAbYwt + return 0 + kubectl_bin get svc monitoring-mongos ++ mktemp + local LAST_OUT=/tmp/tmp.PddjozqGT8 ++ mktemp + local LAST_ERR=/tmp/tmp.UOMG8IuZWh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PddjozqGT8 NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-mongos ClusterIP 34.118.233.120 27019/TCP 14m + cat /tmp/tmp.UOMG8IuZWh + rm /tmp/tmp.PddjozqGT8 /tmp/tmp.UOMG8IuZWh + return 0 + does_node_id_exists_in_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/28640ebe-d995-4112-a260-4cf68f407df7 /node_id/966badb4-70cc-49fb-a038-d82deec37007 /node_id/b823740a-addf-47d7-a8a4-c9996a727f4c /node_id/fe2f5228-5fff-406b-850d-5e628a437cf8 /node_id/45626b15-efd5-4b89-9f27-097301a33fc2 /node_id/ea395276-6c73-4aa9-88da-3371bcda54d2 /node_id/e6a28bfe-b2f9-4ff1-8d21-963d70b4d373 /node_id/7975fea8-f43c-4a74-b05a-a4a0b27d2283 /node_id/9a7b7267-4548-4b17-a155-f5f2c9037f7d ++ nodeList=('/node_id/28640ebe-d995-4112-a260-4cf68f407df7' '/node_id/966badb4-70cc-49fb-a038-d82deec37007' '/node_id/b823740a-addf-47d7-a8a4-c9996a727f4c' '/node_id/fe2f5228-5fff-406b-850d-5e628a437cf8' '/node_id/45626b15-efd5-4b89-9f27-097301a33fc2' '/node_id/ea395276-6c73-4aa9-88da-3371bcda54d2' '/node_id/e6a28bfe-b2f9-4ff1-8d21-963d70b4d373' '/node_id/7975fea8-f43c-4a74-b05a-a4a0b27d2283' '/node_id/9a7b7267-4548-4b17-a155-f5f2c9037f7d') ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/28640ebe-d995-4112-a260-4cf68f407df7 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.XDxIGT6uFR +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Ej5hPp8Rzg ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.XDxIGT6uFR ++++ cat /tmp/tmp.Ej5hPp8Rzg ++++ rm /tmp/tmp.XDxIGT6uFR /tmp/tmp.Ej5hPp8Rzg ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ihxyFsMwO3 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.CRhcQA0Atd ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.ihxyFsMwO3 ++++ cat /tmp/tmp.CRhcQA0Atd ++++ rm /tmp/tmp.ihxyFsMwO3 /tmp/tmp.CRhcQA0Atd ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nLCIzE8Hll ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mwLrQP8MmU +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.nLCIzE8Hll +++ cat /tmp/tmp.mwLrQP8MmU +++ rm /tmp/tmp.nLCIzE8Hll /tmp/tmp.mwLrQP8MmU +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/966badb4-70cc-49fb-a038-d82deec37007 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.3bsxMcm6kz +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.yCydL2wEd9 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.3bsxMcm6kz ++++ cat /tmp/tmp.yCydL2wEd9 ++++ rm /tmp/tmp.3bsxMcm6kz /tmp/tmp.yCydL2wEd9 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.GeUuf9XQmo +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.t7gi2O62w3 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.GeUuf9XQmo ++++ cat /tmp/tmp.t7gi2O62w3 ++++ rm /tmp/tmp.GeUuf9XQmo /tmp/tmp.t7gi2O62w3 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wDgnfq4Gk7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1IQXikrONQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wDgnfq4Gk7 +++ cat /tmp/tmp.1IQXikrONQ +++ rm /tmp/tmp.wDgnfq4Gk7 /tmp/tmp.1IQXikrONQ +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/b823740a-addf-47d7-a8a4-c9996a727f4c ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.LXvgw5N0MC +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ydI9LGO0Hd ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.LXvgw5N0MC ++++ cat /tmp/tmp.ydI9LGO0Hd ++++ rm /tmp/tmp.LXvgw5N0MC /tmp/tmp.ydI9LGO0Hd ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.kLsaUCKorn +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.mskdqigwr0 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.kLsaUCKorn ++++ cat /tmp/tmp.mskdqigwr0 ++++ rm /tmp/tmp.kLsaUCKorn /tmp/tmp.mskdqigwr0 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wzIhWmwptF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.rKoKN63CSs +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wzIhWmwptF +++ cat /tmp/tmp.rKoKN63CSs +++ rm /tmp/tmp.wzIhWmwptF /tmp/tmp.rKoKN63CSs +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep /node_id/fe2f5228-5fff-406b-850d-5e628a437cf8 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.IlCbTJ8Lu6 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.IkmJ6PSZKZ ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.IlCbTJ8Lu6 ++++ cat /tmp/tmp.IkmJ6PSZKZ ++++ rm /tmp/tmp.IlCbTJ8Lu6 /tmp/tmp.IkmJ6PSZKZ ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.tzskLPDzOr +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.z3bx6PUIQK ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.tzskLPDzOr ++++ cat /tmp/tmp.z3bx6PUIQK ++++ rm /tmp/tmp.tzskLPDzOr /tmp/tmp.z3bx6PUIQK ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QSmpn3O4hA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.sRxLs7dy6Q +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.QSmpn3O4hA +++ cat /tmp/tmp.sRxLs7dy6Q +++ rm /tmp/tmp.QSmpn3O4hA /tmp/tmp.sRxLs7dy6Q +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/45626b15-efd5-4b89-9f27-097301a33fc2 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.2WrsaQbbaf +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.iKhappsg8J ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.2WrsaQbbaf ++++ cat /tmp/tmp.iKhappsg8J ++++ rm /tmp/tmp.2WrsaQbbaf /tmp/tmp.iKhappsg8J ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.KKIMoJtWqy +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.nKvnYD1rJ0 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.KKIMoJtWqy ++++ cat /tmp/tmp.nKvnYD1rJ0 ++++ rm /tmp/tmp.KKIMoJtWqy /tmp/tmp.nKvnYD1rJ0 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vJUeY0mTC0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wdlIrI7IeP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.vJUeY0mTC0 +++ cat /tmp/tmp.wdlIrI7IeP +++ rm /tmp/tmp.vJUeY0mTC0 /tmp/tmp.wdlIrI7IeP +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/ea395276-6c73-4aa9-88da-3371bcda54d2 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.5SclbRI9od +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.mEEVTQvi5r ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.5SclbRI9od ++++ cat /tmp/tmp.mEEVTQvi5r ++++ rm /tmp/tmp.5SclbRI9od /tmp/tmp.mEEVTQvi5r ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ZrpcICdMEn +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.foCg4cKhfY ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.ZrpcICdMEn ++++ cat /tmp/tmp.foCg4cKhfY ++++ rm /tmp/tmp.ZrpcICdMEn /tmp/tmp.foCg4cKhfY ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.N1aboRi6WZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Qr8vVj00Xd +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.N1aboRi6WZ +++ cat /tmp/tmp.Qr8vVj00Xd +++ rm /tmp/tmp.N1aboRi6WZ /tmp/tmp.Qr8vVj00Xd +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/e6a28bfe-b2f9-4ff1-8d21-963d70b4d373 ++++ get_pmm_service_ip monitoring-service +++ awk '{print $4}' ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.KHb2ndzMrg +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.dGepxKp2Rp ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.KHb2ndzMrg ++++ cat /tmp/tmp.dGepxKp2Rp ++++ rm /tmp/tmp.KHb2ndzMrg /tmp/tmp.dGepxKp2Rp ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.X9xKAmX8h3 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.vBfLFklOoI ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.X9xKAmX8h3 ++++ cat /tmp/tmp.vBfLFklOoI ++++ rm /tmp/tmp.X9xKAmX8h3 /tmp/tmp.vBfLFklOoI ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.LjwsVBL3Ms ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IlfF6vFXSs +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.LjwsVBL3Ms +++ cat /tmp/tmp.IlfF6vFXSs +++ rm /tmp/tmp.LjwsVBL3Ms /tmp/tmp.IlfF6vFXSs +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/7975fea8-f43c-4a74-b05a-a4a0b27d2283 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.zUzOebiRUa +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.TjbmboJC6H ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.zUzOebiRUa ++++ cat /tmp/tmp.TjbmboJC6H ++++ rm /tmp/tmp.zUzOebiRUa /tmp/tmp.TjbmboJC6H ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ras3UGtUO9 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.O8qqbfS18A ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.ras3UGtUO9 ++++ cat /tmp/tmp.O8qqbfS18A ++++ rm /tmp/tmp.ras3UGtUO9 /tmp/tmp.O8qqbfS18A ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.S6MLrBxYai ++++ mktemp +++ local LAST_ERR=/tmp/tmp.R1CuhajlWG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.S6MLrBxYai +++ cat /tmp/tmp.R1CuhajlWG +++ rm /tmp/tmp.S6MLrBxYai /tmp/tmp.R1CuhajlWG +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/9a7b7267-4548-4b17-a155-f5f2c9037f7d +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.qmSnDlp4pX +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.9FkFPrFvh9 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.qmSnDlp4pX ++++ cat /tmp/tmp.9FkFPrFvh9 ++++ rm /tmp/tmp.qmSnDlp4pX /tmp/tmp.9FkFPrFvh9 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.422N3G22O1 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.E58uh3RYgn ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.422N3G22O1 ++++ cat /tmp/tmp.E58uh3RYgn ++++ rm /tmp/tmp.422N3G22O1 /tmp/tmp.E58uh3RYgn ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nNibsLDFoV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.eZ1zOrcsRl +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24852 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.30.40.245/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.nNibsLDFoV +++ cat /tmp/tmp.eZ1zOrcsRl +++ rm /tmp/tmp.nNibsLDFoV /tmp/tmp.eZ1zOrcsRl +++ return 0 ++ echo + desc 'check customClusterName for pmm' + set +o xtrace ----------------------------------------------------------------------------------- check customClusterName for pmm ----------------------------------------------------------------------------------- + custom_name=custom-cluster-name + kubectl_bin patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":false}, {"op":"add","path":"/spec/pmm/customClusterName","value":custom-cluster-name}]' ++ mktemp + local LAST_OUT=/tmp/tmp.Dlp0EKMbOP ++ mktemp + local LAST_ERR=/tmp/tmp.4SAbrR52dW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":false}, {"op":"add","path":"/spec/pmm/customClusterName","value":custom-cluster-name}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Dlp0EKMbOP perconaservermongodb.psmdb.percona.com/monitoring patched + cat /tmp/tmp.4SAbrR52dW + rm /tmp/tmp.Dlp0EKMbOP /tmp/tmp.4SAbrR52dW + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready..........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready...........OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hhvxC4f5xU +++ mktemp ++ local LAST_ERR=/tmp/tmp.CxtH8zOLrM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hhvxC4f5xU ++ cat /tmp/tmp.CxtH8zOLrM ++ rm /tmp/tmp.hhvxC4f5xU /tmp/tmp.CxtH8zOLrM ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready............OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kUKFg0xNiV +++ mktemp ++ local LAST_ERR=/tmp/tmp.0cIO9Cg0WK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kUKFg0xNiV ++ cat /tmp/tmp.0cIO9Cg0WK ++ rm /tmp/tmp.kUKFg0xNiV /tmp/tmp.0cIO9Cg0WK ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EYhfnd4AcY +++ mktemp ++ local LAST_ERR=/tmp/tmp.Znvw0nckTM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EYhfnd4AcY ++ cat /tmp/tmp.Znvw0nckTM ++ rm /tmp/tmp.EYhfnd4AcY /tmp/tmp.Znvw0nckTM ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness......... ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ jq '.status.loadBalancer.ingress[].hostname' +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UVHdwQARLu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5aFHie5jcw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.UVHdwQARLu +++ cat /tmp/tmp.5aFHie5jcw +++ rm /tmp/tmp.UVHdwQARLu /tmp/tmp.5aFHie5jcw +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.F8CdeFPd5v ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ywPibXi7oz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.F8CdeFPd5v +++ cat /tmp/tmp.ywPibXi7oz +++ rm /tmp/tmp.F8CdeFPd5v /tmp/tmp.ywPibXi7oz +++ return 0 ++ local ip=34.30.40.245 ++ '[' -n 34.30.40.245 -a 34.30.40.245 '!=' null ']' ++ echo 34.30.40.245 ++ return + curl -s -k -d '{"service_type":"MONGODB_SERVICE"}' https://admin:admin@34.30.40.245/v1/inventory/Services/List + check_custom_cluster_name monitoring-2-0-24852-monitoring-mongos-0 /tmp/tmp.FZuz5QQQV1/pmm_service_list.json + local pod_service_name=monitoring-2-0-24852-monitoring-mongos-0 + local pmm_services_file=/tmp/tmp.FZuz5QQQV1/pmm_service_list.json + echo 'Checking monitoring-2-0-24852-monitoring-mongos-0' Checking monitoring-2-0-24852-monitoring-mongos-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-24852-monitoring-mongos-0") | .cluster' /tmp/tmp.FZuz5QQQV1/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + check_custom_cluster_name monitoring-2-0-24852-monitoring-rs0-0 /tmp/tmp.FZuz5QQQV1/pmm_service_list.json + local pod_service_name=monitoring-2-0-24852-monitoring-rs0-0 + local pmm_services_file=/tmp/tmp.FZuz5QQQV1/pmm_service_list.json + echo 'Checking monitoring-2-0-24852-monitoring-rs0-0' Checking monitoring-2-0-24852-monitoring-rs0-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-24852-monitoring-rs0-0") | .cluster' /tmp/tmp.FZuz5QQQV1/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + check_custom_cluster_name monitoring-2-0-24852-monitoring-cfg-0 /tmp/tmp.FZuz5QQQV1/pmm_service_list.json + local pod_service_name=monitoring-2-0-24852-monitoring-cfg-0 + local pmm_services_file=/tmp/tmp.FZuz5QQQV1/pmm_service_list.json + echo 'Checking monitoring-2-0-24852-monitoring-cfg-0' Checking monitoring-2-0-24852-monitoring-cfg-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-24852-monitoring-cfg-0") | .cluster' /tmp/tmp.FZuz5QQQV1/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + [[ -n '' ]] ++ kubectl_bin logs monitoring-rs0-0 pmm-client ++ grep -c 'cannot auto discover databases and collections' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0mZjoMH4Lf +++ mktemp ++ local LAST_ERR=/tmp/tmp.MENY8AMe0L ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0mZjoMH4Lf ++ cat /tmp/tmp.MENY8AMe0L ++ rm /tmp/tmp.0mZjoMH4Lf /tmp/tmp.MENY8AMe0L ++ return 0 + [[ 0 != 0 ]] + helm uninstall monitoring release "monitoring" uninstalled + destroy monitoring-2-0-24852 + local namespace=monitoring-2-0-24852 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.MZ215Q6XDP +++ mktemp ++ local LAST_ERR=/tmp/tmp.vPR5wMHva5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MZ215Q6XDP ++ cat /tmp/tmp.vPR5wMHva5 No resources found in monitoring-2-0-24852 namespace. ++ rm /tmp/tmp.MZ215Q6XDP /tmp/tmp.vPR5wMHva5 ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.luCPSFONDe ++ mktemp + local LAST_ERR=/tmp/tmp.byhOD4wENm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.luCPSFONDe customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.byhOD4wENm + rm /tmp/tmp.luCPSFONDe /tmp/tmp.byhOD4wENm + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.MMYjWdn4Xk ++ mktemp + local LAST_ERR=/tmp/tmp.ov54efDYAj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MMYjWdn4Xk + cat /tmp/tmp.ov54efDYAj + rm /tmp/tmp.MMYjWdn4Xk /tmp/tmp.ov54efDYAj + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.itrByalsTX ++ mktemp + local LAST_ERR=/tmp/tmp.AMxAmDRnm7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.itrByalsTX + cat /tmp/tmp.AMxAmDRnm7 + rm /tmp/tmp.itrByalsTX /tmp/tmp.AMxAmDRnm7 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.CT8dPS6XaE ++ mktemp + local LAST_ERR=/tmp/tmp.lIEeFHNdDJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CT8dPS6XaE + cat /tmp/tmp.lIEeFHNdDJ + rm /tmp/tmp.CT8dPS6XaE /tmp/tmp.lIEeFHNdDJ + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.bXc364ZewZ ++ mktemp + local LAST_ERR=/tmp/tmp.ts9FFX45wP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bXc364ZewZ clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.ts9FFX45wP + rm /tmp/tmp.bXc364ZewZ /tmp/tmp.ts9FFX45wP + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.ecU0YmfAYi ++ mktemp + local LAST_ERR=/tmp/tmp.Qe85t5rR87 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.ecU0YmfAYi namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted from cert-manager namespace serviceaccount "cert-manager" deleted from cert-manager namespace serviceaccount "cert-manager-webhook" deleted from cert-manager namespace clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager-tokenrequest" deleted from cert-manager namespace role.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted from cert-manager namespace rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace service "cert-manager-cainjector" deleted from cert-manager namespace deployment.apps "cert-manager-cainjector" deleted from cert-manager namespace deployment.apps "cert-manager" deleted from cert-manager namespace deployment.apps "cert-manager-webhook" deleted from cert-manager namespace mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.Qe85t5rR87 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.ecU0YmfAYi namespace "cert-manager" deleted + cat /tmp/tmp.Qe85t5rR87 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.ecU0YmfAYi + cat /tmp/tmp.Qe85t5rR87 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.ecU0YmfAYi + cat /tmp/tmp.Qe85t5rR87 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.ecU0YmfAYi /tmp/tmp.Qe85t5rR87 + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace monitoring-2-0-24852 + rm -rf /tmp/tmp.FZuz5QQQV1 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.3NuH2mOQs8 ++ mktemp + local LAST_OUT=/tmp/tmp.fnm4TH6a9K + local LAST_ERR=/tmp/tmp.wxWGn39GIx + desc 'test passed' + local exit_status=0 + set +o xtrace + local timeout=4 ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.P9jeMZofRw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace monitoring-2-0-24852