Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/logs/monitoring-2-0.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + create_infra monitoring-2-0-24245 + local ns=monitoring-2-0-24245 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.a24FUOJV4V ++ mktemp + local LAST_ERR=/tmp/tmp.pwkq2fvznE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.a24FUOJV4V customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.pwkq2fvznE + rm /tmp/tmp.a24FUOJV4V /tmp/tmp.pwkq2fvznE + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/deploy/crd.yaml grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.JxPJZXB7IN ++ mktemp + local LAST_ERR=/tmp/tmp.5zUQKyaJ5P + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JxPJZXB7IN + cat /tmp/tmp.5zUQKyaJ5P + rm /tmp/tmp.JxPJZXB7IN /tmp/tmp.5zUQKyaJ5P + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.WWlT2xrFb2 ++ mktemp + local LAST_ERR=/tmp/tmp.lnx9AreuMk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WWlT2xrFb2 + cat /tmp/tmp.lnx9AreuMk + rm /tmp/tmp.WWlT2xrFb2 /tmp/tmp.lnx9AreuMk + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.0KlTTSr09z ++ mktemp + local LAST_ERR=/tmp/tmp.wXCp8HQ7hU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0KlTTSr09z + cat /tmp/tmp.wXCp8HQ7hU + rm /tmp/tmp.0KlTTSr09z /tmp/tmp.wXCp8HQ7hU + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.EbUTHj7GIg ++ mktemp + local LAST_ERR=/tmp/tmp.L3piztTXAY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EbUTHj7GIg clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.L3piztTXAY + rm /tmp/tmp.EbUTHj7GIg /tmp/tmp.L3piztTXAY + return 0 + check_crd_for_deletion PR-2283-e32dd8b3 + local git_tag=PR-2283-e32dd8b3 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2283-e32dd8b3/deploy/crd.yaml ++ /usr/sbin/sed s/---//g ++ yq eval .metadata.name ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RcHbHF3WgN +++ mktemp ++ local LAST_ERR=/tmp/tmp.b1PqcWYlgc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.RcHbHF3WgN ++ cat /tmp/tmp.b1PqcWYlgc Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.RcHbHF3WgN ++ cat /tmp/tmp.b1PqcWYlgc Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.RcHbHF3WgN ++ cat /tmp/tmp.b1PqcWYlgc Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.RcHbHF3WgN ++ cat /tmp/tmp.b1PqcWYlgc Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.RcHbHF3WgN /tmp/tmp.b1PqcWYlgc ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ awk '-F ' '{print $2}' ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns + awk '{print$1}' ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.P0QvRfMcLT ++ mktemp + local LAST_OUT=/tmp/tmp.34F3EIoqkw + local LAST_ERR=/tmp/tmp.OE9LgOK0T8 + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.Hb6GBz04th + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + for i in $(seq 0 2) + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.P0QvRfMcLT + cat /tmp/tmp.OE9LgOK0T8 + rm /tmp/tmp.P0QvRfMcLT /tmp/tmp.OE9LgOK0T8 + return 0 namespace "cert-manager" deleted namespace "monitoring-2-0-8704" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.34F3EIoqkw namespace "psmdb-operator" deleted + cat /tmp/tmp.Hb6GBz04th + rm /tmp/tmp.34F3EIoqkw /tmp/tmp.Hb6GBz04th + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Ppwhv2jBbx ++ mktemp + local LAST_ERR=/tmp/tmp.KnNt0uNprZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ppwhv2jBbx + cat /tmp/tmp.KnNt0uNprZ + rm /tmp/tmp.Ppwhv2jBbx /tmp/tmp.KnNt0uNprZ + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.tJ3GzofUuO ++ mktemp + local LAST_ERR=/tmp/tmp.vNS6N8ASKo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tJ3GzofUuO namespace/psmdb-operator created + cat /tmp/tmp.vNS6N8ASKo + rm /tmp/tmp.tJ3GzofUuO /tmp/tmp.vNS6N8ASKo + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.Og8dd6TEuG +++ mktemp ++ local LAST_ERR=/tmp/tmp.L7fHGQNmm9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Og8dd6TEuG ++ cat /tmp/tmp.L7fHGQNmm9 ++ rm /tmp/tmp.Og8dd6TEuG /tmp/tmp.L7fHGQNmm9 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2283-e32dd8b3-7-cluster11 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Zgw5OQbSM1 ++ mktemp + local LAST_ERR=/tmp/tmp.IOA2WbCkv3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2283-e32dd8b3-7-cluster11 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Zgw5OQbSM1 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2283-e32dd8b3-7-cluster11" modified. + cat /tmp/tmp.IOA2WbCkv3 + rm /tmp/tmp.Zgw5OQbSM1 /tmp/tmp.IOA2WbCkv3 + return 0 + deploy_operator + desc 'start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2283-e32dd8b3' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2283-e32dd8b3 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.ch1deuJZkA ++ mktemp + local LAST_ERR=/tmp/tmp.qAG9r1iC1v + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ch1deuJZkA customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.qAG9r1iC1v + rm /tmp/tmp.ch1deuJZkA /tmp/tmp.qAG9r1iC1v + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/deploy/cw-rbac.yaml + kubectl_bin apply -n psmdb-operator -f - + sed -e 's^namespace: .*^namespace: psmdb-operator^' ++ mktemp + local LAST_OUT=/tmp/tmp.tGrIEobjZh ++ mktemp + local LAST_ERR=/tmp/tmp.qWjqKApN3x + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tGrIEobjZh clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.qWjqKApN3x + rm /tmp/tmp.tGrIEobjZh /tmp/tmp.qWjqKApN3x + return 0 + yq eval ' (.spec.template.spec.containers[].image = "docker.io/perconalab/percona-server-mongodb-operator:PR-2283-e32dd8b3") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.zgldayXPuf ++ mktemp + local LAST_ERR=/tmp/tmp.eRcJ44JF7J + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zgldayXPuf deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.eRcJ44JF7J + rm /tmp/tmp.zgldayXPuf /tmp/tmp.eRcJ44JF7J + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.rdMLr4TqZM +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z3Dfb0kcu7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rdMLr4TqZM ++ cat /tmp/tmp.Z3Dfb0kcu7 ++ rm /tmp/tmp.rdMLr4TqZM /tmp/tmp.Z3Dfb0kcu7 ++ return 0 + wait_operator_pod percona-server-mongodb-operator-57ff5689b7-8npc2 + local pod=percona-server-mongodb-operator-57ff5689b7-8npc2 + set +o xtrace waiting for pod/percona-server-mongodb-operator-57ff5689b7-8npc2 to be ready.OK + echo 'Print operator info from log' Print operator info from log ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator + grep 'Manager starting up' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1w5iWYkcop +++ mktemp ++ local LAST_ERR=/tmp/tmp.LsOijI1yIe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1w5iWYkcop ++ cat /tmp/tmp.LsOijI1yIe ++ rm /tmp/tmp.1w5iWYkcop /tmp/tmp.LsOijI1yIe ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-57ff5689b7-8npc2 ++ mktemp + local LAST_OUT=/tmp/tmp.4yBTQzVMVT ++ mktemp + local LAST_ERR=/tmp/tmp.8hs95T042e + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-57ff5689b7-8npc2 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4yBTQzVMVT + cat /tmp/tmp.8hs95T042e + rm /tmp/tmp.4yBTQzVMVT /tmp/tmp.8hs95T042e + return 0 2026-03-27T12:25:57.470Z INFO setup Manager starting up {"gitCommit": "e32dd8b338bc8af11fb1e90bb80d577a1c65923f", "gitBranch": "PR-2283-e32dd8b3", "buildTime": "", "goVersion": "go1.25.8", "os": "linux", "arch": "amd64"} + create_namespace monitoring-2-0-24245 + local namespace=monitoring-2-0-24245 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' ++ tail -n1 ++ helm list --all-namespaces --filter chaos-mesh + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' ++ mktemp + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces monitoring-2-0-24245' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-2-0-24245 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-24245 --ignore-not-found + local LAST_OUT=/tmp/tmp.UEYDhQlX7Q ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.cEDLLzFC1m + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.7XDuJEZjvl + for i in $(seq 0 2) + set +e + kubectl get ns ++ mktemp + local LAST_ERR=/tmp/tmp.tQJnLOf8zl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace monitoring-2-0-24245 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UEYDhQlX7Q + cat /tmp/tmp.cEDLLzFC1m + rm /tmp/tmp.UEYDhQlX7Q /tmp/tmp.cEDLLzFC1m + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7XDuJEZjvl + cat /tmp/tmp.tQJnLOf8zl + rm /tmp/tmp.7XDuJEZjvl /tmp/tmp.tQJnLOf8zl + return 0 + kubectl_bin wait --for=delete namespace monitoring-2-0-24245 ++ mktemp + local LAST_OUT=/tmp/tmp.COYYoioo7J ++ mktemp + local LAST_ERR=/tmp/tmp.6PyXn15sTq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace monitoring-2-0-24245 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.COYYoioo7J + cat /tmp/tmp.6PyXn15sTq + rm /tmp/tmp.COYYoioo7J /tmp/tmp.6PyXn15sTq + return 0 + desc 'create namespace monitoring-2-0-24245' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-24245 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-24245 ++ mktemp + local LAST_OUT=/tmp/tmp.brSfPoADiR ++ mktemp + local LAST_ERR=/tmp/tmp.qwuGrQNCIM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace monitoring-2-0-24245 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.brSfPoADiR namespace/monitoring-2-0-24245 created + cat /tmp/tmp.qwuGrQNCIM + rm /tmp/tmp.brSfPoADiR /tmp/tmp.qwuGrQNCIM + return 0 + set_kube_ctx monitoring-2-0-24245 + local namespace=monitoring-2-0-24245 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.4qL5FdSE4w +++ mktemp ++ local LAST_ERR=/tmp/tmp.5o8aKDPvRm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4qL5FdSE4w ++ cat /tmp/tmp.5o8aKDPvRm ++ rm /tmp/tmp.4qL5FdSE4w /tmp/tmp.5o8aKDPvRm ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2283-e32dd8b3-7-cluster11 --namespace=monitoring-2-0-24245 ++ mktemp + local LAST_OUT=/tmp/tmp.QotHkTalFp ++ mktemp + local LAST_ERR=/tmp/tmp.tHp61QxjbJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2283-e32dd8b3-7-cluster11 --namespace=monitoring-2-0-24245 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QotHkTalFp Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2283-e32dd8b3-7-cluster11" modified. + cat /tmp/tmp.tHp61QxjbJ + rm /tmp/tmp.QotHkTalFp /tmp/tmp.tHp61QxjbJ + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.bLTUNyNYga ++ mktemp + local LAST_ERR=/tmp/tmp.thGQcxzyfM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bLTUNyNYga namespace/cert-manager created + cat /tmp/tmp.thGQcxzyfM + rm /tmp/tmp.bLTUNyNYga /tmp/tmp.thGQcxzyfM + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.dm7CtFIwj0 ++ mktemp + local LAST_ERR=/tmp/tmp.j9j4n2yw9Y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dm7CtFIwj0 namespace/cert-manager labeled + cat /tmp/tmp.j9j4n2yw9Y + rm /tmp/tmp.dm7CtFIwj0 /tmp/tmp.j9j4n2yw9Y + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.8VjNFWQpMs ++ mktemp + local LAST_ERR=/tmp/tmp.IPshLm7Ali + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8VjNFWQpMs namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.IPshLm7Ali Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.8VjNFWQpMs /tmp/tmp.IPshLm7Ali + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.5QihulNhQS ++ mktemp + local LAST_ERR=/tmp/tmp.JAkjo87gwQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5QihulNhQS pod/cert-manager-559d798845-6k9rz condition met pod/cert-manager-cainjector-64958d9c7c-zj6tb condition met pod/cert-manager-webhook-7fb6f99b56-p25cc condition met + cat /tmp/tmp.JAkjo87gwQ + rm /tmp/tmp.5QihulNhQS /tmp/tmp.JAkjo87gwQ + return 0 + sleep 120 + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + helm repo remove stable Error: no repo named "stable" found + : + helm repo add stable https://charts.helm.sh/stable "stable" has been added to your repositories + [[ -n '' ]] + retry 10 60 helm install monitoring --set imageTag=dev-latest --set imageRepo=docker.io/perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageTag=dev-latest --set imageRepo=docker.io/perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Fri Mar 27 12:29:01 2026 NAMESPACE: monitoring-2-0-24245 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-24245.svc.cluster.local:443 login: admin password: admin + sleep 40 + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.QHBvK9mimA ++ mktemp + local LAST_ERR=/tmp/tmp.U5cn75XkXz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.QHBvK9mimA + cat /tmp/tmp.U5cn75XkXz error: Internal error occurred: unable to upgrade connection: container not found ("monitoring") + sleep 0 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.QHBvK9mimA + cat /tmp/tmp.U5cn75XkXz error: Internal error occurred: unable to upgrade connection: container not found ("monitoring") + sleep 4 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.QHBvK9mimA + cat /tmp/tmp.U5cn75XkXz error: Internal error occurred: unable to upgrade connection: container not found ("monitoring") + sleep 8 + cat /tmp/tmp.QHBvK9mimA + cat /tmp/tmp.U5cn75XkXz error: Internal error occurred: unable to upgrade connection: container not found ("monitoring") + rm /tmp/tmp.QHBvK9mimA /tmp/tmp.U5cn75XkXz + return 1 + echo 'Retry 0' Retry 0 + sleep 5 + let retry+=1 + '[' 1 -ge 30 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.DS5vBkgkUx ++ mktemp + local LAST_ERR=/tmp/tmp.kK0YxYyu3J + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DS5vBkgkUx + cat /tmp/tmp.kK0YxYyu3J + rm /tmp/tmp.DS5vBkgkUx /tmp/tmp.kK0YxYyu3J + return 0 + cluster=monitoring + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.hvceot9y6s ++ mktemp + local LAST_ERR=/tmp/tmp.SO4j5EvAaH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hvceot9y6s secret/some-users created secret/some-users unchanged + cat /tmp/tmp.SO4j5EvAaH + rm /tmp/tmp.hvceot9y6s /tmp/tmp.SO4j5EvAaH + return 0 + yq '.spec.template.spec.volumes[0].secret.secretName="monitoring-ssl"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/conf/client_with_tls.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.s5pMJrYoFy ++ mktemp + local LAST_ERR=/tmp/tmp.m1xejmE1YN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.s5pMJrYoFy deployment.apps/psmdb-client created + cat /tmp/tmp.m1xejmE1YN + rm /tmp/tmp.s5pMJrYoFy /tmp/tmp.m1xejmE1YN + return 0 + sleep 90 + desc 'create first PSMDB cluster monitoring' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster monitoring ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + kubectl_bin apply -f - + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2283-e32dd8b3"' + /usr/sbin/sed -e s/NAME_SPACE/monitoring-2-0-24245/g + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.jGGfD1ceIr ++ mktemp + local LAST_ERR=/tmp/tmp.zgnpUkCjva + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jGGfD1ceIr perconaservermongodb.psmdb.percona.com/monitoring created + cat /tmp/tmp.zgnpUkCjva + rm /tmp/tmp.jGGfD1ceIr /tmp/tmp.zgnpUkCjva + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready..................OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WApdfCFjhZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.Y7s47mVyQc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WApdfCFjhZ ++ cat /tmp/tmp.Y7s47mVyQc ++ rm /tmp/tmp.WApdfCFjhZ /tmp/tmp.Y7s47mVyQc ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready................OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0l4BelDlTC +++ mktemp ++ local LAST_ERR=/tmp/tmp.6uPDo0UVxy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0l4BelDlTC ++ cat /tmp/tmp.6uPDo0UVxy ++ rm /tmp/tmp.0l4BelDlTC /tmp/tmp.6uPDo0UVxy ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lxTFxWCVwt +++ mktemp ++ local LAST_ERR=/tmp/tmp.Lvcvlqrn5t ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lxTFxWCVwt ++ cat /tmp/tmp.Lvcvlqrn5t ++ rm /tmp/tmp.lxTFxWCVwt /tmp/tmp.Lvcvlqrn5t ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.................... + desc 'check if pmm-client container is not enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container is not enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 -no-pmm + local resource=statefulset/monitoring-rs0 + local postfix=-no-pmm + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml + local new_result=/tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-24245", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.oh63HomE6E ++ mktemp + local LAST_ERR=/tmp/tmp.gAbriQ3fIl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oh63HomE6E + cat /tmp/tmp.gAbriQ3fIl + rm /tmp/tmp.oh63HomE6E /tmp/tmp.gAbriQ3fIl + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml /tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-rs0.yml + log 'compare_kubectl: statefulset/monitoring-rs0 OK' + set +o xtrace [2026-03-27T12:34:19+0000] compare_kubectl: statefulset/monitoring-rs0 OK + sleep 10 + custom_port=27019 + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-24245 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-24245 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.V6G1znylGw +++ mktemp ++ local LAST_ERR=/tmp/tmp.wdO2ylg99W ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.V6G1znylGw ++ cat /tmp/tmp.wdO2ylg99W ++ rm /tmp/tmp.V6G1znylGw /tmp/tmp.wdO2ylg99W ++ return 0 + local client_container=psmdb-client-699f458f75-s752z + kubectl_bin exec psmdb-client-699f458f75-s752z -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-24245.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.sRwtG6sVsl ++ mktemp + local LAST_ERR=/tmp/tmp.BEx5M5oZsR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-s752z -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-24245.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sRwtG6sVsl Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-24245.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-27T12:34:32.460Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("45d810a4-36a8-44d0-9dc7-8a232866ba62") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.BEx5M5oZsR + rm /tmp/tmp.sRwtG6sVsl /tmp/tmp.BEx5M5oZsR + return 0 + run_mongos 'sh.enableSharding("myApp")' clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-24245 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=sh.enableSharding("myApp")' + local uri=clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-24245 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GUvkCJJTut +++ mktemp ++ local LAST_ERR=/tmp/tmp.OPioKEOe0P ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GUvkCJJTut ++ cat /tmp/tmp.OPioKEOe0P ++ rm /tmp/tmp.GUvkCJJTut /tmp/tmp.OPioKEOe0P ++ return 0 + local client_container=psmdb-client-699f458f75-s752z + kubectl_bin exec psmdb-client-699f458f75-s752z -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-24245.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.IXfughDzRy ++ mktemp + local LAST_ERR=/tmp/tmp.vRuwr5ie60 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-s752z -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-24245.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IXfughDzRy Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-24245.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-27T12:34:34.562Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("a415b6fc-4956-4a48-8303-86e6df8b682d") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1774614874, 8), "signature" : { "hash" : BinData(0,"YSsbV0ivBNTQs49XNSXRzFafPLQ="), "keyId" : NumberLong("7621912451688169496") } }, "operationTime" : Timestamp(1774614874, 5) } bye + cat /tmp/tmp.vRuwr5ie60 + rm /tmp/tmp.IXfughDzRy /tmp/tmp.vRuwr5ie60 + return 0 + insert_data_mongos 100500 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100500 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@monitoring-mongos.monitoring-2-0-24245 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-24245 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pPhotycgbl +++ mktemp ++ local LAST_ERR=/tmp/tmp.0GFVGhucY8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pPhotycgbl ++ cat /tmp/tmp.0GFVGhucY8 ++ rm /tmp/tmp.pPhotycgbl /tmp/tmp.0GFVGhucY8 ++ return 0 + local client_container=psmdb-client-699f458f75-s752z + kubectl_bin exec psmdb-client-699f458f75-s752z -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-24245.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.0QDxKUM75U ++ mktemp + local LAST_ERR=/tmp/tmp.iUl5WVLwLs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-s752z -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-24245.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0QDxKUM75U Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-24245.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-27T12:34:37.260Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("f0d70022-535a-4d3d-bba2-a98425532df3") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.iUl5WVLwLs + rm /tmp/tmp.0QDxKUM75U /tmp/tmp.iUl5WVLwLs + return 0 + insert_data_mongos 100600 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100600 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100600 })' myApp:myPass@monitoring-mongos.monitoring-2-0-24245 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100600 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-24245 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ih5ysYOFmt +++ mktemp ++ local LAST_ERR=/tmp/tmp.0yKear5Bfe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ih5ysYOFmt ++ cat /tmp/tmp.0yKear5Bfe ++ rm /tmp/tmp.Ih5ysYOFmt /tmp/tmp.0yKear5Bfe ++ return 0 + local client_container=psmdb-client-699f458f75-s752z + kubectl_bin exec psmdb-client-699f458f75-s752z -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-24245.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.d9wLFMixqN ++ mktemp + local LAST_ERR=/tmp/tmp.PJbhuVIRXi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-s752z -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-24245.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.d9wLFMixqN Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-24245.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-27T12:34:39.926Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("86d4d192-0319-47c5-9802-6a2a9c820b2a") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.PJbhuVIRXi + rm /tmp/tmp.d9wLFMixqN /tmp/tmp.PJbhuVIRXi + return 0 + insert_data_mongos 100700 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100700 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100700 })' myApp:myPass@monitoring-mongos.monitoring-2-0-24245 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100700 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-24245 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FCfxYURCUW +++ mktemp ++ local LAST_ERR=/tmp/tmp.ns4jLBGSyi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FCfxYURCUW ++ cat /tmp/tmp.ns4jLBGSyi ++ rm /tmp/tmp.FCfxYURCUW /tmp/tmp.ns4jLBGSyi ++ return 0 + local client_container=psmdb-client-699f458f75-s752z + kubectl_bin exec psmdb-client-699f458f75-s752z -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-24245.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.ZK7QViu7LT ++ mktemp + local LAST_ERR=/tmp/tmp.84LUMFUCZa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-s752z -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-24245.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZK7QViu7LT Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-24245.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-27T12:34:41.988Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("2e5e642d-f413-4816-8b2d-d9ad03446620") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.84LUMFUCZa + rm /tmp/tmp.ZK7QViu7LT /tmp/tmp.84LUMFUCZa + return 0 + desc 'add PMM_SERVER_API_KEY for secret some-users' + set +o xtrace ----------------------------------------------------------------------------------- add PMM_SERVER_API_KEY for secret some-users ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.BjSTvVzg41 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.NqVUVMZaw2 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.BjSTvVzg41 ++++ cat /tmp/tmp.NqVUVMZaw2 ++++ rm /tmp/tmp.BjSTvVzg41 /tmp/tmp.NqVUVMZaw2 ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.uRSMiJ7Ei8 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.69kmvzLDYV ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.uRSMiJ7Ei8 ++++ cat /tmp/tmp.69kmvzLDYV ++++ rm /tmp/tmp.uRSMiJ7Ei8 /tmp/tmp.69kmvzLDYV ++++ return 0 +++ local ip=35.224.235.34 +++ '[' -n 35.224.235.34 -a 35.224.235.34 '!=' null ']' +++ echo 35.224.235.34 +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@35.224.235.34/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 155 100 119 100 36 279 84 --:--:-- --:--:-- --:--:-- 363 + API_KEY='"eyJrIjoiUXRyc1BPVVdCaUVIS3Q5Nk9YVEk3cnhPeWJzOVh0WTQiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' + kubectl_bin patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiUXRyc1BPVVdCaUVIS3Q5Nk9YVEk3cnhPeWJzOVh0WTQiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' ++ mktemp + local LAST_OUT=/tmp/tmp.X6Ag7qzjCB ++ mktemp + local LAST_ERR=/tmp/tmp.ofbI0wz9w0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiUXRyc1BPVVdCaUVIS3Q5Nk9YVEk3cnhPeWJzOVh0WTQiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.X6Ag7qzjCB secret/some-users patched + cat /tmp/tmp.ofbI0wz9w0 + rm /tmp/tmp.X6Ag7qzjCB /tmp/tmp.ofbI0wz9w0 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5FiV4AeXiO +++ mktemp ++ local LAST_ERR=/tmp/tmp.Pkoo7YPzk6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5FiV4AeXiO ++ cat /tmp/tmp.Pkoo7YPzk6 ++ rm /tmp/tmp.5FiV4AeXiO /tmp/tmp.Pkoo7YPzk6 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready.OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5DW1TQYU6R +++ mktemp ++ local LAST_ERR=/tmp/tmp.iHEf8gzQbc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5DW1TQYU6R ++ cat /tmp/tmp.iHEf8gzQbc ++ rm /tmp/tmp.5DW1TQYU6R /tmp/tmp.iHEf8gzQbc ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NCsyDxSnVd +++ mktemp ++ local LAST_ERR=/tmp/tmp.BUGHOFjvZX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NCsyDxSnVd ++ cat /tmp/tmp.BUGHOFjvZX ++ rm /tmp/tmp.NCsyDxSnVd /tmp/tmp.BUGHOFjvZX ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness....................................................................................................................................................... + sleep 90 + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 + local resource=statefulset/monitoring-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml + local new_result=/tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | ++ mktemp del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-24245", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.Ij2g0A8OHM ++ mktemp + local LAST_ERR=/tmp/tmp.ZiWstpzQOv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ij2g0A8OHM + cat /tmp/tmp.ZiWstpzQOv + rm /tmp/tmp.Ij2g0A8OHM /tmp/tmp.ZiWstpzQOv + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml /tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-rs0.yml + log 'compare_kubectl: statefulset/monitoring-rs0 OK' + set +o xtrace [2026-03-27T12:41:38+0000] compare_kubectl: statefulset/monitoring-rs0 OK + compare_kubectl service/monitoring-rs0 + local resource=service/monitoring-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml + local new_result=/tmp/tmp.DK7M6zKIcZ/service_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-24245", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.0v733gTLU2 ++ mktemp + local LAST_ERR=/tmp/tmp.pcVrrPGH7D + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0v733gTLU2 + cat /tmp/tmp.pcVrrPGH7D + rm /tmp/tmp.0v733gTLU2 /tmp/tmp.pcVrrPGH7D + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.DK7M6zKIcZ/service_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.DK7M6zKIcZ/service_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.DK7M6zKIcZ/service_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml /tmp/tmp.DK7M6zKIcZ/service_monitoring-rs0.yml + log 'compare_kubectl: service/monitoring-rs0 OK' + set +o xtrace [2026-03-27T12:41:39+0000] compare_kubectl: service/monitoring-rs0 OK + compare_kubectl service/monitoring-mongos + local resource=service/monitoring-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml + local new_result=/tmp/tmp.DK7M6zKIcZ/service_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-24245", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.FO53Kr8xNf ++ mktemp + local LAST_ERR=/tmp/tmp.QUtLaYhgZi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FO53Kr8xNf + cat /tmp/tmp.QUtLaYhgZi + rm /tmp/tmp.FO53Kr8xNf /tmp/tmp.QUtLaYhgZi + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.DK7M6zKIcZ/service_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.DK7M6zKIcZ/service_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.DK7M6zKIcZ/service_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml /tmp/tmp.DK7M6zKIcZ/service_monitoring-mongos.yml + log 'compare_kubectl: service/monitoring-mongos OK' + set +o xtrace [2026-03-27T12:41:40+0000] compare_kubectl: service/monitoring-mongos OK + compare_kubectl statefulset/monitoring-cfg + local resource=statefulset/monitoring-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml + local new_result=/tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-cfg ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-24245", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.WbwaHQK0mW ++ mktemp + local LAST_ERR=/tmp/tmp.lWkcrTprOX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WbwaHQK0mW + cat /tmp/tmp.lWkcrTprOX + rm /tmp/tmp.WbwaHQK0mW /tmp/tmp.lWkcrTprOX + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-cfg.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml /tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-cfg.yml + log 'compare_kubectl: statefulset/monitoring-cfg OK' + set +o xtrace [2026-03-27T12:41:41+0000] compare_kubectl: statefulset/monitoring-cfg OK + compare_kubectl statefulset/monitoring-mongos + local resource=statefulset/monitoring-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml + local new_result=/tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-24245", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.jGcAwP5xV9 ++ mktemp + local LAST_ERR=/tmp/tmp.xE0N3cLcjY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jGcAwP5xV9 + cat /tmp/tmp.xE0N3cLcjY + rm /tmp/tmp.jGcAwP5xV9 /tmp/tmp.xE0N3cLcjY + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml /tmp/tmp.DK7M6zKIcZ/statefulset_monitoring-mongos.yml + log 'compare_kubectl: statefulset/monitoring-mongos OK' + set +o xtrace [2026-03-27T12:41:43+0000] compare_kubectl: statefulset/monitoring-mongos OK + desc 'check mongod metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongod metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-24245-monitoring-rs0-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-24245-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774615243 ++ /usr/sbin/date -u +%s + local end=1774615303 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.UVCKYTAmk3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IWxyUc2L47 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.UVCKYTAmk3 +++ cat /tmp/tmp.IWxyUc2L47 +++ rm /tmp/tmp.UVCKYTAmk3 /tmp/tmp.IWxyUc2L47 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7n5PtUvGzk ++++ mktemp +++ local LAST_ERR=/tmp/tmp.I8FIoKSqeF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.7n5PtUvGzk +++ cat /tmp/tmp.I8FIoKSqeF +++ rm /tmp/tmp.7n5PtUvGzk /tmp/tmp.I8FIoKSqeF +++ return 0 ++ local ip=35.224.235.34 ++ '[' -n 35.224.235.34 -a 35.224.235.34 '!=' null ']' ++ echo 35.224.235.34 ++ return + local endpoint=35.224.235.34 + jq '.data.result[0].values[][1]' + curl -s -k 'https://admin:admin@35.224.235.34/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-24245-monitoring-rs0-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-24245-monitoring-rs0-1%22%7D%29&start=1774615243&end=1774615303&step=60' + grep '^"[0-9]' "1774608602" "1774608602" + get_metric_values mongodb_connections monitoring-2-0-24245-monitoring-rs0-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-24245-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774615245 ++ /usr/sbin/date -u +%s + local end=1774615305 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Q9kx5SL737 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FktXjiiy8M +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Q9kx5SL737 +++ cat /tmp/tmp.FktXjiiy8M +++ rm /tmp/tmp.Q9kx5SL737 /tmp/tmp.FktXjiiy8M +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kPxcz7WVfx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.OaikNOe3no +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kPxcz7WVfx +++ cat /tmp/tmp.OaikNOe3no +++ rm /tmp/tmp.kPxcz7WVfx /tmp/tmp.OaikNOe3no +++ return 0 ++ local ip=35.224.235.34 ++ '[' -n 35.224.235.34 -a 35.224.235.34 '!=' null ']' ++ echo 35.224.235.34 ++ return + local endpoint=35.224.235.34 + curl -s -k 'https://admin:admin@35.224.235.34/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-24245-monitoring-rs0-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-24245-monitoring-rs0-1%22%7D%29&start=1774615245&end=1774615305&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "0" "0" + desc 'check mongo config metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongo config metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-24245-monitoring-cfg-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-24245-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774615248 ++ /usr/sbin/date -u +%s + local end=1774615308 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IbZBcA6RzQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.STlfeKG0UN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.IbZBcA6RzQ +++ cat /tmp/tmp.STlfeKG0UN +++ rm /tmp/tmp.IbZBcA6RzQ /tmp/tmp.STlfeKG0UN +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.Mbt1lfMoGf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.em9IFI6C6X +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Mbt1lfMoGf +++ cat /tmp/tmp.em9IFI6C6X +++ rm /tmp/tmp.Mbt1lfMoGf /tmp/tmp.em9IFI6C6X +++ return 0 ++ local ip=35.224.235.34 ++ '[' -n 35.224.235.34 -a 35.224.235.34 '!=' null ']' ++ echo 35.224.235.34 ++ return + local endpoint=35.224.235.34 + curl -s -k 'https://admin:admin@35.224.235.34/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-24245-monitoring-cfg-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-24245-monitoring-cfg-1%22%7D%29&start=1774615248&end=1774615308&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "1774612861" "1774612861" + get_metric_values mongodb_connections monitoring-2-0-24245-monitoring-cfg-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-24245-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774615249 ++ /usr/sbin/date -u +%s + local end=1774615309 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nIUAdaKUoO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Yj2ewL34wE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.nIUAdaKUoO +++ cat /tmp/tmp.Yj2ewL34wE +++ rm /tmp/tmp.nIUAdaKUoO /tmp/tmp.Yj2ewL34wE +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.r8Tuqovaq6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.APf2Fq2JLM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.r8Tuqovaq6 +++ cat /tmp/tmp.APf2Fq2JLM +++ rm /tmp/tmp.r8Tuqovaq6 /tmp/tmp.APf2Fq2JLM +++ return 0 ++ local ip=35.224.235.34 ++ '[' -n 35.224.235.34 -a 35.224.235.34 '!=' null ']' ++ echo 35.224.235.34 ++ return + local endpoint=35.224.235.34 + curl -s -k 'https://admin:admin@35.224.235.34/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-24245-monitoring-cfg-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-24245-monitoring-cfg-1%22%7D%29&start=1774615249&end=1774615309&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "0" "0" + desc 'check mongos metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongos metrics ----------------------------------------------------------------------------------- ++ kubectl get pod -l app.kubernetes.io/component=mongos -o 'jsonpath={.items[0].metadata.name}' + MONGOS_POD_NAME=monitoring-mongos-0 + get_metric_values node_boot_time_seconds monitoring-2-0-24245-monitoring-mongos-0 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-24245-monitoring-mongos-0 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774615253 ++ /usr/sbin/date -u +%s + local end=1774615313 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.eQfvkSp089 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9mqI33mbgy +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.eQfvkSp089 +++ cat /tmp/tmp.9mqI33mbgy +++ rm /tmp/tmp.eQfvkSp089 /tmp/tmp.9mqI33mbgy +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gyXIi7nJFx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.m0Iue4uqyp +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.gyXIi7nJFx +++ cat /tmp/tmp.m0Iue4uqyp +++ rm /tmp/tmp.gyXIi7nJFx /tmp/tmp.m0Iue4uqyp +++ return 0 ++ local ip=35.224.235.34 ++ '[' -n 35.224.235.34 -a 35.224.235.34 '!=' null ']' ++ echo 35.224.235.34 ++ return + local endpoint=35.224.235.34 + curl -s -k 'https://admin:admin@35.224.235.34/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-24245-monitoring-mongos-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-24245-monitoring-mongos-0%22%7D%29&start=1774615253&end=1774615313&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "1774608602" "1774608602" + sleep 90 + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan_values mongodb dev-mongod admin:admin + local service_type=mongodb + local environment=dev-mongod + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/sbin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2026-03-27T00:43:25+00:00 ++ /usr/sbin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2026-03-27T12:43:25+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wPlDRTAI84 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.djohx6UQxn +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wPlDRTAI84 +++ cat /tmp/tmp.djohx6UQxn +++ rm /tmp/tmp.wPlDRTAI84 /tmp/tmp.djohx6UQxn +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xbduPPkEet ++++ mktemp +++ local LAST_ERR=/tmp/tmp.tncDjV9QOH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.xbduPPkEet +++ cat /tmp/tmp.tncDjV9QOH +++ rm /tmp/tmp.xbduPPkEet /tmp/tmp.tncDjV9QOH +++ return 0 ++ local ip=35.224.235.34 ++ '[' -n 35.224.235.34 -a 35.224.235.34 '!=' null ']' ++ echo 35.224.235.34 ++ return + endpoint=35.224.235.34 + cat + local response + retry=0 ++ curl -s -k -XPOST -d @payload.json https://admin:admin@35.224.235.34/v0/qan/GetReport ++ jq '.rows[].sparkline' + [[ [ { "time_frame": 360, "timestamp": "2026-03-27T12:43:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-27T12:37:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-27T12:31:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-27T12:25:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-27T12:19:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-27T12:13:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-27T12:07:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-27T12:01:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-27T11:55:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-27T11:49:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-27T11:43:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-27T11:37:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-27T11:31:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-27T11:25:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-27T11:19:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-27T11:13:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-27T11:07:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-27T11:01:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-27T10:55:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-27T10:49:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-27T10:43:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-27T10:37:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-27T10:31:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-27T10:25:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-27T10:19:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-27T10:13:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-27T10:07:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-27T10:01:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-27T09:55:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-27T09:49:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-27T09:43:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-27T09:37:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-27T09:31:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-27T09:25:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-27T09:19:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-27T09:13:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-27T09:07:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-27T09:01:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-27T08:55:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-27T08:49:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-27T08:43:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-27T08:37:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-27T08:31:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-27T08:25:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-27T08:19:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-27T08:13:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-27T08:07:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-27T08:01:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-27T07:55:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-27T07:49:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-27T07:43:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-27T07:37:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-27T07:31:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-27T07:25:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-27T07:19:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-27T07:13:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-27T07:07:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-27T07:01:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-27T06:55:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-27T06:49:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-27T06:43:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-27T06:37:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-27T06:31:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-27T06:25:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-27T06:19:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-27T06:13:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-27T06:07:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-27T06:01:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-27T05:55:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-27T05:49:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-27T05:43:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-27T05:37:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-27T05:31:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-27T05:25:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-27T05:19:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-27T05:13:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-27T05:07:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-27T05:01:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-27T04:55:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-27T04:49:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-27T04:43:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-27T04:37:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-27T04:31:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-27T04:25:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-27T04:19:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-27T04:13:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-27T04:07:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-27T04:01:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-27T03:55:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-27T03:49:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-27T03:43:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-27T03:37:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-27T03:31:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-27T03:25:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-27T03:19:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-27T03:13:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-27T03:07:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-27T03:01:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-27T02:55:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-27T02:49:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-27T02:43:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-27T02:37:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-27T02:31:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-27T02:25:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-27T02:19:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-27T02:13:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-27T02:07:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-27T02:01:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-27T01:55:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-27T01:49:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-27T01:43:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-27T01:37:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-27T01:31:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-27T01:25:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-27T01:19:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-27T01:13:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-27T01:07:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-27T01:01:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-27T00:55:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-27T00:49:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-27T12:43:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-27T12:37:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-27T12:31:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-27T12:25:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-27T12:19:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-27T12:13:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-27T12:07:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-27T12:01:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-27T11:55:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-27T11:49:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-27T11:43:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-27T11:37:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-27T11:31:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-27T11:25:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-27T11:19:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-27T11:13:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-27T11:07:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-27T11:01:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-27T10:55:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-27T10:49:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-27T10:43:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-27T10:37:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-27T10:31:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-27T10:25:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-27T10:19:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-27T10:13:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-27T10:07:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-27T10:01:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-27T09:55:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-27T09:49:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-27T09:43:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-27T09:37:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-27T09:31:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-27T09:25:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-27T09:19:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-27T09:13:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-27T09:07:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-27T09:01:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-27T08:55:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-27T08:49:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-27T08:43:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-27T08:37:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-27T08:31:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-27T08:25:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-27T08:19:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-27T08:13:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-27T08:07:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-27T08:01:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-27T07:55:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-27T07:49:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-27T07:43:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-27T07:37:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-27T07:31:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-27T07:25:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-27T07:19:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-27T07:13:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-27T07:07:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-27T07:01:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-27T06:55:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-27T06:49:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-27T06:43:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-27T06:37:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-27T06:31:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-27T06:25:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-27T06:19:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-27T06:13:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-27T06:07:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-27T06:01:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-27T05:55:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-27T05:49:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-27T05:43:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-27T05:37:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-27T05:31:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-27T05:25:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-27T05:19:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-27T05:13:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-27T05:07:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-27T05:01:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-27T04:55:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-27T04:49:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-27T04:43:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-27T04:37:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-27T04:31:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-27T04:25:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-27T04:19:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-27T04:13:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-27T04:07:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-27T04:01:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-27T03:55:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-27T03:49:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-27T03:43:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-27T03:37:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-27T03:31:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-27T03:25:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-27T03:19:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-27T03:13:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-27T03:07:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-27T03:01:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-27T02:55:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-27T02:49:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-27T02:43:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-27T02:37:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-27T02:31:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-27T02:25:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-27T02:19:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-27T02:13:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-27T02:07:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-27T02:01:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-27T01:55:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-27T01:49:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-27T01:43:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-27T01:37:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-27T01:31:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-27T01:25:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-27T01:19:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-27T01:13:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-27T01:07:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-27T01:01:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-27T00:55:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-27T00:49:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-27T12:43:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-27T12:37:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-27T12:31:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-27T12:25:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-27T12:19:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-27T12:13:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-27T12:07:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-27T12:01:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-27T11:55:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-27T11:49:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-27T11:43:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-27T11:37:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-27T11:31:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-27T11:25:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-27T11:19:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-27T11:13:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-27T11:07:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-27T11:01:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-27T10:55:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-27T10:49:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-27T10:43:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-27T10:37:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-27T10:31:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-27T10:25:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-27T10:19:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-27T10:13:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-27T10:07:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-27T10:01:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-27T09:55:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-27T09:49:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-27T09:43:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-27T09:37:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-27T09:31:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-27T09:25:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-27T09:19:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-27T09:13:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-27T09:07:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-27T09:01:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-27T08:55:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-27T08:49:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-27T08:43:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-27T08:37:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-27T08:31:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-27T08:25:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-27T08:19:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-27T08:13:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-27T08:07:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-27T08:01:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-27T07:55:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-27T07:49:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-27T07:43:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-27T07:37:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-27T07:31:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-27T07:25:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-27T07:19:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-27T07:13:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-27T07:07:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-27T07:01:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-27T06:55:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-27T06:49:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-27T06:43:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-27T06:37:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-27T06:31:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-27T06:25:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-27T06:19:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-27T06:13:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-27T06:07:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-27T06:01:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-27T05:55:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-27T05:49:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-27T05:43:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-27T05:37:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-27T05:31:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-27T05:25:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-27T05:19:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-27T05:13:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-27T05:07:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-27T05:01:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-27T04:55:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-27T04:49:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-27T04:43:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-27T04:37:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-27T04:31:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-27T04:25:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-27T04:19:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-27T04:13:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-27T04:07:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-27T04:01:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-27T03:55:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-27T03:49:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-27T03:43:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-27T03:37:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-27T03:31:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-27T03:25:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-27T03:19:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-27T03:13:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-27T03:07:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-27T03:01:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-27T02:55:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-27T02:49:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-27T02:43:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-27T02:37:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-27T02:31:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-27T02:25:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-27T02:19:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-27T02:13:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-27T02:07:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-27T02:01:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-27T01:55:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-27T01:49:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-27T01:43:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-27T01:37:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-27T01:31:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-27T01:25:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-27T01:19:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-27T01:13:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-27T01:07:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-27T01:01:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-27T00:55:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-27T00:49:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-27T12:43:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-27T12:37:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-27T12:31:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-27T12:25:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-27T12:19:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-27T12:13:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-27T12:07:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-27T12:01:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-27T11:55:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-27T11:49:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-27T11:43:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-27T11:37:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-27T11:31:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-27T11:25:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-27T11:19:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-27T11:13:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-27T11:07:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-27T11:01:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-27T10:55:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-27T10:49:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-27T10:43:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-27T10:37:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-27T10:31:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-27T10:25:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-27T10:19:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-27T10:13:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-27T10:07:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-27T10:01:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-27T09:55:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-27T09:49:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-27T09:43:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-27T09:37:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-27T09:31:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-27T09:25:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-27T09:19:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-27T09:13:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-27T09:07:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-27T09:01:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-27T08:55:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-27T08:49:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-27T08:43:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-27T08:37:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-27T08:31:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-27T08:25:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-27T08:19:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-27T08:13:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-27T08:07:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-27T08:01:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-27T07:55:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-27T07:49:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-27T07:43:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-27T07:37:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-27T07:31:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-27T07:25:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-27T07:19:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-27T07:13:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-27T07:07:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-27T07:01:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-27T06:55:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-27T06:49:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-27T06:43:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-27T06:37:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-27T06:31:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-27T06:25:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-27T06:19:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-27T06:13:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-27T06:07:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-27T06:01:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-27T05:55:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-27T05:49:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-27T05:43:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-27T05:37:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-27T05:31:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-27T05:25:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-27T05:19:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-27T05:13:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-27T05:07:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-27T05:01:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-27T04:55:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-27T04:49:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-27T04:43:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-27T04:37:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-27T04:31:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-27T04:25:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-27T04:19:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-27T04:13:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-27T04:07:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-27T04:01:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-27T03:55:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-27T03:49:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-27T03:43:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-27T03:37:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-27T03:31:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-27T03:25:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-27T03:19:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-27T03:13:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-27T03:07:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-27T03:01:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-27T02:55:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-27T02:49:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-27T02:43:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-27T02:37:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-27T02:31:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-27T02:25:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-27T02:19:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-27T02:13:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-27T02:07:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-27T02:01:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-27T01:55:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-27T01:49:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-27T01:43:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-27T01:37:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-27T01:31:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-27T01:25:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-27T01:19:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-27T01:13:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-27T01:07:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-27T01:01:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-27T00:55:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-27T00:49:00Z" } ] != \n\u\l\l ]] + rm -f payload.json + get_qan_values mongodb dev-mongos admin:admin + local service_type=mongodb + local environment=dev-mongos + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/sbin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2026-03-27T00:43:28+00:00 ++ /usr/sbin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2026-03-27T12:43:28+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PF8GsoispB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FytA1RS2yd +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.PF8GsoispB +++ cat /tmp/tmp.FytA1RS2yd +++ rm /tmp/tmp.PF8GsoispB /tmp/tmp.FytA1RS2yd +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3RMpHYKJI5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Kw2faZNGvk +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.3RMpHYKJI5 +++ cat /tmp/tmp.Kw2faZNGvk +++ rm /tmp/tmp.3RMpHYKJI5 /tmp/tmp.Kw2faZNGvk +++ return 0 ++ local ip=35.224.235.34 ++ '[' -n 35.224.235.34 -a 35.224.235.34 '!=' null ']' ++ echo 35.224.235.34 ++ return + endpoint=35.224.235.34 + cat + local response + retry=0 ++ curl -s -k -XPOST -d @payload.json https://admin:admin@35.224.235.34/v0/qan/GetReport ++ jq '.rows[].sparkline' + [[ [ { "time_frame": 360, "timestamp": "2026-03-27T12:43:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-27T12:37:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-27T12:31:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-27T12:25:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-27T12:19:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-27T12:13:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-27T12:07:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-27T12:01:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-27T11:55:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-27T11:49:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-27T11:43:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-27T11:37:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-27T11:31:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-27T11:25:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-27T11:19:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-27T11:13:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-27T11:07:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-27T11:01:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-27T10:55:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-27T10:49:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-27T10:43:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-27T10:37:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-27T10:31:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-27T10:25:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-27T10:19:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-27T10:13:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-27T10:07:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-27T10:01:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-27T09:55:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-27T09:49:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-27T09:43:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-27T09:37:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-27T09:31:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-27T09:25:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-27T09:19:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-27T09:13:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-27T09:07:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-27T09:01:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-27T08:55:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-27T08:49:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-27T08:43:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-27T08:37:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-27T08:31:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-27T08:25:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-27T08:19:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-27T08:13:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-27T08:07:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-27T08:01:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-27T07:55:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-27T07:49:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-27T07:43:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-27T07:37:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-27T07:31:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-27T07:25:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-27T07:19:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-27T07:13:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-27T07:07:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-27T07:01:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-27T06:55:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-27T06:49:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-27T06:43:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-27T06:37:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-27T06:31:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-27T06:25:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-27T06:19:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-27T06:13:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-27T06:07:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-27T06:01:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-27T05:55:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-27T05:49:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-27T05:43:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-27T05:37:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-27T05:31:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-27T05:25:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-27T05:19:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-27T05:13:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-27T05:07:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-27T05:01:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-27T04:55:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-27T04:49:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-27T04:43:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-27T04:37:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-27T04:31:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-27T04:25:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-27T04:19:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-27T04:13:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-27T04:07:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-27T04:01:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-27T03:55:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-27T03:49:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-27T03:43:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-27T03:37:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-27T03:31:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-27T03:25:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-27T03:19:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-27T03:13:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-27T03:07:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-27T03:01:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-27T02:55:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-27T02:49:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-27T02:43:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-27T02:37:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-27T02:31:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-27T02:25:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-27T02:19:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-27T02:13:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-27T02:07:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-27T02:01:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-27T01:55:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-27T01:49:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-27T01:43:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-27T01:37:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-27T01:31:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-27T01:25:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-27T01:19:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-27T01:13:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-27T01:07:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-27T01:01:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-27T00:55:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-27T00:49:00Z" } ] != \n\u\l\l ]] + rm -f payload.json + nodeList=($(get_node_id_from_pmm)) ++ get_node_id_from_pmm ++ nodeList=() ++ local -a nodeList +++ kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rqPW6QZwZt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IFfMzdSvCy +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.rqPW6QZwZt +++ cat /tmp/tmp.IFfMzdSvCy +++ rm /tmp/tmp.rqPW6QZwZt /tmp/tmp.IFfMzdSvCy +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kr9Pr2vkAA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zaJbVhjI4J +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kr9Pr2vkAA +++ cat /tmp/tmp.zaJbVhjI4J +++ rm /tmp/tmp.kr9Pr2vkAA /tmp/tmp.zaJbVhjI4J +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0k17jk9cZ6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RAOi11ELpE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.0k17jk9cZ6 +++ cat /tmp/tmp.RAOi11ELpE +++ rm /tmp/tmp.0k17jk9cZ6 /tmp/tmp.RAOi11ELpE +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.J4XnbTTQQZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cCqHAjN0aT +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.J4XnbTTQQZ +++ cat /tmp/tmp.cCqHAjN0aT +++ rm /tmp/tmp.J4XnbTTQQZ /tmp/tmp.cCqHAjN0aT +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RdeSZkfhaf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yACSlZL4SI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RdeSZkfhaf +++ cat /tmp/tmp.yACSlZL4SI +++ rm /tmp/tmp.RdeSZkfhaf /tmp/tmp.yACSlZL4SI +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4Zn3M461Sz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.a18Ddeaxxg +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4Zn3M461Sz +++ cat /tmp/tmp.a18Ddeaxxg +++ rm /tmp/tmp.4Zn3M461Sz /tmp/tmp.a18Ddeaxxg +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.H6dWpOglar ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uSqETC8aNt +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.H6dWpOglar +++ cat /tmp/tmp.uSqETC8aNt +++ rm /tmp/tmp.H6dWpOglar /tmp/tmp.uSqETC8aNt +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.yXj6bzOuiG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AgYjv00y9B +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.yXj6bzOuiG +++ cat /tmp/tmp.AgYjv00y9B +++ rm /tmp/tmp.yXj6bzOuiG /tmp/tmp.AgYjv00y9B +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8lJiEqCLEE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zEUFFoO6VU +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.8lJiEqCLEE +++ cat /tmp/tmp.zEUFFoO6VU +++ rm /tmp/tmp.8lJiEqCLEE /tmp/tmp.zEUFFoO6VU +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IViRC13NML ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VhooI0ntZX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.IViRC13NML +++ cat /tmp/tmp.VhooI0ntZX +++ rm /tmp/tmp.IViRC13NML /tmp/tmp.VhooI0ntZX +++ return 0 ++ echo /node_id/8732ed7f-ecdc-41d2-8d14-a2ea205ee730 /node_id/6e04810f-ff04-4d3e-afe5-efd3ed0c2983 /node_id/b39230bf-013b-4469-8aee-b2d4bbf850cf /node_id/8d364e49-7b43-4422-a327-6699e091ef8d /node_id/98fa8c2f-d864-43fb-a7ef-eabd0e4b2a22 /node_id/4d1ae4d0-1ef0-46c8-b715-d8059e034479 /node_id/a602e4c2-e78c-4b16-afc1-e94d12ea73ca /node_id/3ad31721-00cf-4c8e-af2d-58216e8d3309 /node_id/2a1ccd92-2701-4824-9ec7-0388be9fe086 + nodeList_from_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/8732ed7f-ecdc-41d2-8d14-a2ea205ee730 /node_id/6e04810f-ff04-4d3e-afe5-efd3ed0c2983 /node_id/b39230bf-013b-4469-8aee-b2d4bbf850cf /node_id/8d364e49-7b43-4422-a327-6699e091ef8d /node_id/98fa8c2f-d864-43fb-a7ef-eabd0e4b2a22 /node_id/4d1ae4d0-1ef0-46c8-b715-d8059e034479 /node_id/a602e4c2-e78c-4b16-afc1-e94d12ea73ca /node_id/3ad31721-00cf-4c8e-af2d-58216e8d3309 /node_id/2a1ccd92-2701-4824-9ec7-0388be9fe086 ++ nodeList=('/node_id/8732ed7f-ecdc-41d2-8d14-a2ea205ee730' '/node_id/6e04810f-ff04-4d3e-afe5-efd3ed0c2983' '/node_id/b39230bf-013b-4469-8aee-b2d4bbf850cf' '/node_id/8d364e49-7b43-4422-a327-6699e091ef8d' '/node_id/98fa8c2f-d864-43fb-a7ef-eabd0e4b2a22' '/node_id/4d1ae4d0-1ef0-46c8-b715-d8059e034479' '/node_id/a602e4c2-e78c-4b16-afc1-e94d12ea73ca' '/node_id/3ad31721-00cf-4c8e-af2d-58216e8d3309' '/node_id/2a1ccd92-2701-4824-9ec7-0388be9fe086') ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/8732ed7f-ecdc-41d2-8d14-a2ea205ee730 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.JyqEjhCt4w +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.iioVYitvc8 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.JyqEjhCt4w ++++ cat /tmp/tmp.iioVYitvc8 ++++ rm /tmp/tmp.JyqEjhCt4w /tmp/tmp.iioVYitvc8 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.6EbIETr7Ap +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.2xsQq85DnC ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.6EbIETr7Ap ++++ cat /tmp/tmp.2xsQq85DnC ++++ rm /tmp/tmp.6EbIETr7Ap /tmp/tmp.2xsQq85DnC ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kfaLg3fAIU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FUYr6crers +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kfaLg3fAIU +++ cat /tmp/tmp.FUYr6crers +++ rm /tmp/tmp.kfaLg3fAIU /tmp/tmp.FUYr6crers +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/6e04810f-ff04-4d3e-afe5-efd3ed0c2983 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.7Ox9vP2Mox +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Bj3RjC3vlD ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.7Ox9vP2Mox ++++ cat /tmp/tmp.Bj3RjC3vlD ++++ rm /tmp/tmp.7Ox9vP2Mox /tmp/tmp.Bj3RjC3vlD ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.WXQqiJY1Zv +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.RYU5FILsoo ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.WXQqiJY1Zv ++++ cat /tmp/tmp.RYU5FILsoo ++++ rm /tmp/tmp.WXQqiJY1Zv /tmp/tmp.RYU5FILsoo ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.AIINVFvdQc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0ZWrS9A4fL +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.AIINVFvdQc +++ cat /tmp/tmp.0ZWrS9A4fL +++ rm /tmp/tmp.AIINVFvdQc /tmp/tmp.0ZWrS9A4fL +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/b39230bf-013b-4469-8aee-b2d4bbf850cf +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.QJl65Bl3qX +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.goDsdKv6F7 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.QJl65Bl3qX ++++ cat /tmp/tmp.goDsdKv6F7 ++++ rm /tmp/tmp.QJl65Bl3qX /tmp/tmp.goDsdKv6F7 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Pn4jZcXkXM +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.4p5KqxL4TT ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.Pn4jZcXkXM ++++ cat /tmp/tmp.4p5KqxL4TT ++++ rm /tmp/tmp.Pn4jZcXkXM /tmp/tmp.4p5KqxL4TT ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Bkg15Ku41u ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XirvM0rTfZ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Bkg15Ku41u +++ cat /tmp/tmp.XirvM0rTfZ +++ rm /tmp/tmp.Bkg15Ku41u /tmp/tmp.XirvM0rTfZ +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ grep /node_id/8d364e49-7b43-4422-a327-6699e091ef8d ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.uCt0J8uCRM +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.0dlWJYF46M ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.uCt0J8uCRM ++++ cat /tmp/tmp.0dlWJYF46M ++++ rm /tmp/tmp.uCt0J8uCRM /tmp/tmp.0dlWJYF46M ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.7rTApHsYSg +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.IEfPQEUong ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.7rTApHsYSg ++++ cat /tmp/tmp.IEfPQEUong ++++ rm /tmp/tmp.7rTApHsYSg /tmp/tmp.IEfPQEUong ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.SetQck7Ck6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nNPHDQYb19 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.SetQck7Ck6 +++ cat /tmp/tmp.nNPHDQYb19 +++ rm /tmp/tmp.SetQck7Ck6 /tmp/tmp.nNPHDQYb19 +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/98fa8c2f-d864-43fb-a7ef-eabd0e4b2a22 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.oiSe2sRNzY +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ORd2k3BIUi ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.oiSe2sRNzY ++++ cat /tmp/tmp.ORd2k3BIUi ++++ rm /tmp/tmp.oiSe2sRNzY /tmp/tmp.ORd2k3BIUi ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.THrRwLFnrW +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.5VsBWjUerz ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.THrRwLFnrW ++++ cat /tmp/tmp.5VsBWjUerz ++++ rm /tmp/tmp.THrRwLFnrW /tmp/tmp.5VsBWjUerz ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8jIPTHH5rW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.eGkuU48pwf +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.8jIPTHH5rW +++ cat /tmp/tmp.eGkuU48pwf +++ rm /tmp/tmp.8jIPTHH5rW /tmp/tmp.eGkuU48pwf +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/4d1ae4d0-1ef0-46c8-b715-d8059e034479 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Fk3I9ouuiI +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.yPNJQysBZL ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.Fk3I9ouuiI ++++ cat /tmp/tmp.yPNJQysBZL ++++ rm /tmp/tmp.Fk3I9ouuiI /tmp/tmp.yPNJQysBZL ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.hTDyEE1dBb +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.DGRdsJXNrE ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.hTDyEE1dBb ++++ cat /tmp/tmp.DGRdsJXNrE ++++ rm /tmp/tmp.hTDyEE1dBb /tmp/tmp.DGRdsJXNrE ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CBGLDiveGo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.EFWahuCEDu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.CBGLDiveGo +++ cat /tmp/tmp.EFWahuCEDu +++ rm /tmp/tmp.CBGLDiveGo /tmp/tmp.EFWahuCEDu +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/a602e4c2-e78c-4b16-afc1-e94d12ea73ca +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.rTN5VCrbzs +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.6UrnTPA5wQ ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.rTN5VCrbzs ++++ cat /tmp/tmp.6UrnTPA5wQ ++++ rm /tmp/tmp.rTN5VCrbzs /tmp/tmp.6UrnTPA5wQ ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.PrfcXnC643 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ueAL9Kly2L ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.PrfcXnC643 ++++ cat /tmp/tmp.ueAL9Kly2L ++++ rm /tmp/tmp.PrfcXnC643 /tmp/tmp.ueAL9Kly2L ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wCtAjaugO8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JxWc5iUR77 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wCtAjaugO8 +++ cat /tmp/tmp.JxWc5iUR77 +++ rm /tmp/tmp.wCtAjaugO8 /tmp/tmp.JxWc5iUR77 +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/3ad31721-00cf-4c8e-af2d-58216e8d3309 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.3nAPABQlWM +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.smBziU8ho8 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.3nAPABQlWM ++++ cat /tmp/tmp.smBziU8ho8 ++++ rm /tmp/tmp.3nAPABQlWM /tmp/tmp.smBziU8ho8 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ScJaTXbDaf +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.UFBhUiLprS ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.ScJaTXbDaf ++++ cat /tmp/tmp.UFBhUiLprS ++++ rm /tmp/tmp.ScJaTXbDaf /tmp/tmp.UFBhUiLprS ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jcIMQBeQGo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.USIwfamB8l +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.jcIMQBeQGo +++ cat /tmp/tmp.USIwfamB8l +++ rm /tmp/tmp.jcIMQBeQGo /tmp/tmp.USIwfamB8l +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/2a1ccd92-2701-4824-9ec7-0388be9fe086 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Ut9r9xcFfz +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.BKnWw6pfQT ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.Ut9r9xcFfz ++++ cat /tmp/tmp.BKnWw6pfQT ++++ rm /tmp/tmp.Ut9r9xcFfz /tmp/tmp.BKnWw6pfQT ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.gry4I1ZgA4 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.oAmyZ8PMvG ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.gry4I1ZgA4 ++++ cat /tmp/tmp.oAmyZ8PMvG ++++ rm /tmp/tmp.gry4I1ZgA4 /tmp/tmp.oAmyZ8PMvG ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.W5sBwiIKO5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Vu2OzFwGcF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.W5sBwiIKO5 +++ cat /tmp/tmp.Vu2OzFwGcF +++ rm /tmp/tmp.W5sBwiIKO5 /tmp/tmp.Vu2OzFwGcF +++ return 0 ++ echo /node_id/8732ed7f-ecdc-41d2-8d14-a2ea205ee730 /node_id/6e04810f-ff04-4d3e-afe5-efd3ed0c2983 /node_id/b39230bf-013b-4469-8aee-b2d4bbf850cf /node_id/8d364e49-7b43-4422-a327-6699e091ef8d /node_id/98fa8c2f-d864-43fb-a7ef-eabd0e4b2a22 /node_id/4d1ae4d0-1ef0-46c8-b715-d8059e034479 /node_id/a602e4c2-e78c-4b16-afc1-e94d12ea73ca /node_id/3ad31721-00cf-4c8e-af2d-58216e8d3309 /node_id/2a1ccd92-2701-4824-9ec7-0388be9fe086 + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/8732ed7f-ecdc-41d2-8d14-a2ea205ee730 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/6e04810f-ff04-4d3e-afe5-efd3ed0c2983 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/b39230bf-013b-4469-8aee-b2d4bbf850cf ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/8d364e49-7b43-4422-a327-6699e091ef8d ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/98fa8c2f-d864-43fb-a7ef-eabd0e4b2a22 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/4d1ae4d0-1ef0-46c8-b715-d8059e034479 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/a602e4c2-e78c-4b16-afc1-e94d12ea73ca ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/3ad31721-00cf-4c8e-af2d-58216e8d3309 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/2a1ccd92-2701-4824-9ec7-0388be9fe086 ']' + kubectl_bin patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.WLCVpPwxxc ++ mktemp + local LAST_ERR=/tmp/tmp.7wKeDLhxw3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WLCVpPwxxc perconaservermongodb.psmdb.percona.com/monitoring patched + cat /tmp/tmp.7wKeDLhxw3 + rm /tmp/tmp.WLCVpPwxxc /tmp/tmp.7wKeDLhxw3 + return 0 + wait_for_delete pod/monitoring-mongos-0 + local res=pod/monitoring-mongos-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-mongos-0 to be deleted.........................Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found + wait_for_delete pod/monitoring-rs0-0 + local res=pod/monitoring-rs0-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-rs0-0 to be deleted............Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found + wait_for_delete pod/monitoring-cfg-0 + local res=pod/monitoring-cfg-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-cfg-0 to be deleted...........Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found + desc 'check if services are not deleted' + set +o xtrace ----------------------------------------------------------------------------------- check if services are not deleted ----------------------------------------------------------------------------------- + kubectl_bin get svc monitoring-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.P2i7RF4FHv ++ mktemp + local LAST_ERR=/tmp/tmp.E6qQRNEc0Y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.P2i7RF4FHv NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-rs0 ClusterIP None 27019/TCP 15m + cat /tmp/tmp.E6qQRNEc0Y + rm /tmp/tmp.P2i7RF4FHv /tmp/tmp.E6qQRNEc0Y + return 0 + kubectl_bin get svc monitoring-cfg ++ mktemp + local LAST_OUT=/tmp/tmp.A66iUYh0aY ++ mktemp + local LAST_ERR=/tmp/tmp.sJyfTL5xpa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.A66iUYh0aY NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-cfg ClusterIP None 27019/TCP 15m + cat /tmp/tmp.sJyfTL5xpa + rm /tmp/tmp.A66iUYh0aY /tmp/tmp.sJyfTL5xpa + return 0 + kubectl_bin get svc monitoring-mongos ++ mktemp + local LAST_OUT=/tmp/tmp.LpQFjqLgvO ++ mktemp + local LAST_ERR=/tmp/tmp.GROO8lDijv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LpQFjqLgvO NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-mongos ClusterIP 34.118.228.156 27019/TCP 15m + cat /tmp/tmp.GROO8lDijv + rm /tmp/tmp.LpQFjqLgvO /tmp/tmp.GROO8lDijv + return 0 + does_node_id_exists_in_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/8732ed7f-ecdc-41d2-8d14-a2ea205ee730 /node_id/6e04810f-ff04-4d3e-afe5-efd3ed0c2983 /node_id/b39230bf-013b-4469-8aee-b2d4bbf850cf /node_id/8d364e49-7b43-4422-a327-6699e091ef8d /node_id/98fa8c2f-d864-43fb-a7ef-eabd0e4b2a22 /node_id/4d1ae4d0-1ef0-46c8-b715-d8059e034479 /node_id/a602e4c2-e78c-4b16-afc1-e94d12ea73ca /node_id/3ad31721-00cf-4c8e-af2d-58216e8d3309 /node_id/2a1ccd92-2701-4824-9ec7-0388be9fe086 ++ nodeList=('/node_id/8732ed7f-ecdc-41d2-8d14-a2ea205ee730' '/node_id/6e04810f-ff04-4d3e-afe5-efd3ed0c2983' '/node_id/b39230bf-013b-4469-8aee-b2d4bbf850cf' '/node_id/8d364e49-7b43-4422-a327-6699e091ef8d' '/node_id/98fa8c2f-d864-43fb-a7ef-eabd0e4b2a22' '/node_id/4d1ae4d0-1ef0-46c8-b715-d8059e034479' '/node_id/a602e4c2-e78c-4b16-afc1-e94d12ea73ca' '/node_id/3ad31721-00cf-4c8e-af2d-58216e8d3309' '/node_id/2a1ccd92-2701-4824-9ec7-0388be9fe086') ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/8732ed7f-ecdc-41d2-8d14-a2ea205ee730 ++++ get_pmm_service_ip monitoring-service +++ awk '{print $4}' ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.QEqi6X1KKD +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.31nc0k6N2q ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.QEqi6X1KKD ++++ cat /tmp/tmp.31nc0k6N2q ++++ rm /tmp/tmp.QEqi6X1KKD /tmp/tmp.31nc0k6N2q ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.uOWUKO03Iv +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.PcP4ZqgkRF ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.uOWUKO03Iv ++++ cat /tmp/tmp.PcP4ZqgkRF ++++ rm /tmp/tmp.uOWUKO03Iv /tmp/tmp.PcP4ZqgkRF ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.LDFXj2k55u ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hdR1LlzFRO +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.LDFXj2k55u +++ cat /tmp/tmp.hdR1LlzFRO +++ rm /tmp/tmp.LDFXj2k55u /tmp/tmp.hdR1LlzFRO +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/6e04810f-ff04-4d3e-afe5-efd3ed0c2983 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.yK5mzbS7rH +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.j6LrJ9GJmv ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.yK5mzbS7rH ++++ cat /tmp/tmp.j6LrJ9GJmv ++++ rm /tmp/tmp.yK5mzbS7rH /tmp/tmp.j6LrJ9GJmv ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.vdNRmdJisa +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.LGECw2xEOZ ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.vdNRmdJisa ++++ cat /tmp/tmp.LGECw2xEOZ ++++ rm /tmp/tmp.vdNRmdJisa /tmp/tmp.LGECw2xEOZ ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.MVzgc6W7dz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Tde2r4o2eE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.MVzgc6W7dz +++ cat /tmp/tmp.Tde2r4o2eE +++ rm /tmp/tmp.MVzgc6W7dz /tmp/tmp.Tde2r4o2eE +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/b39230bf-013b-4469-8aee-b2d4bbf850cf +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.um97EAmE5o +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.nMrUF5lX5A ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.um97EAmE5o ++++ cat /tmp/tmp.nMrUF5lX5A ++++ rm /tmp/tmp.um97EAmE5o /tmp/tmp.nMrUF5lX5A ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.p9xPnoaP43 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.jsjnih5rVX ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.p9xPnoaP43 ++++ cat /tmp/tmp.jsjnih5rVX ++++ rm /tmp/tmp.p9xPnoaP43 /tmp/tmp.jsjnih5rVX ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hPEsYD1Ses ++++ mktemp +++ local LAST_ERR=/tmp/tmp.C9APrEU3Kh +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.hPEsYD1Ses +++ cat /tmp/tmp.C9APrEU3Kh +++ rm /tmp/tmp.hPEsYD1Ses /tmp/tmp.C9APrEU3Kh +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/8d364e49-7b43-4422-a327-6699e091ef8d +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.a0fk0388j4 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.YXgDmNGcda ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.a0fk0388j4 ++++ cat /tmp/tmp.YXgDmNGcda ++++ rm /tmp/tmp.a0fk0388j4 /tmp/tmp.YXgDmNGcda ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.cBuZytXTTm +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.a8R5bji2Mz ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.cBuZytXTTm ++++ cat /tmp/tmp.a8R5bji2Mz ++++ rm /tmp/tmp.cBuZytXTTm /tmp/tmp.a8R5bji2Mz ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.b6IVTE8cxG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ouJmewIb5O +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.b6IVTE8cxG +++ cat /tmp/tmp.ouJmewIb5O +++ rm /tmp/tmp.b6IVTE8cxG /tmp/tmp.ouJmewIb5O +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/98fa8c2f-d864-43fb-a7ef-eabd0e4b2a22 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.YSzh0HDahh +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.6NsRHLH66r ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.YSzh0HDahh ++++ cat /tmp/tmp.6NsRHLH66r ++++ rm /tmp/tmp.YSzh0HDahh /tmp/tmp.6NsRHLH66r ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.7wsZKDsfbP +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.3HR8bYTgBU ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.7wsZKDsfbP ++++ cat /tmp/tmp.3HR8bYTgBU ++++ rm /tmp/tmp.7wsZKDsfbP /tmp/tmp.3HR8bYTgBU ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RteHuURdYR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.at0dDE2hVt +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RteHuURdYR +++ cat /tmp/tmp.at0dDE2hVt +++ rm /tmp/tmp.RteHuURdYR /tmp/tmp.at0dDE2hVt +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/4d1ae4d0-1ef0-46c8-b715-d8059e034479 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.gEeYRp9gFv +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.sp5fi5o91n ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.gEeYRp9gFv ++++ cat /tmp/tmp.sp5fi5o91n ++++ rm /tmp/tmp.gEeYRp9gFv /tmp/tmp.sp5fi5o91n ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.3rABvv7GI9 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ykuI6xGk5O ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.3rABvv7GI9 ++++ cat /tmp/tmp.ykuI6xGk5O ++++ rm /tmp/tmp.3rABvv7GI9 /tmp/tmp.ykuI6xGk5O ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1hnMSq7Gg1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AnxMpMSkdE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.1hnMSq7Gg1 +++ cat /tmp/tmp.AnxMpMSkdE +++ rm /tmp/tmp.1hnMSq7Gg1 /tmp/tmp.AnxMpMSkdE +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/a602e4c2-e78c-4b16-afc1-e94d12ea73ca ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.o88AQB8VQm +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.BXQkFjUlhk ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.o88AQB8VQm ++++ cat /tmp/tmp.BXQkFjUlhk ++++ rm /tmp/tmp.o88AQB8VQm /tmp/tmp.BXQkFjUlhk ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.f3ddzjPcoP +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.t3f7hGG4Eo ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.f3ddzjPcoP ++++ cat /tmp/tmp.t3f7hGG4Eo ++++ rm /tmp/tmp.f3ddzjPcoP /tmp/tmp.t3f7hGG4Eo ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TRzWRpljpz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BorFsclQyk +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.TRzWRpljpz +++ cat /tmp/tmp.BorFsclQyk +++ rm /tmp/tmp.TRzWRpljpz /tmp/tmp.BorFsclQyk +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/3ad31721-00cf-4c8e-af2d-58216e8d3309 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.l4ptywHUej +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.VMcMvRASj6 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.l4ptywHUej ++++ cat /tmp/tmp.VMcMvRASj6 ++++ rm /tmp/tmp.l4ptywHUej /tmp/tmp.VMcMvRASj6 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.tC92GMEOI4 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.AX1Em2u7PJ ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.tC92GMEOI4 ++++ cat /tmp/tmp.AX1Em2u7PJ ++++ rm /tmp/tmp.tC92GMEOI4 /tmp/tmp.AX1Em2u7PJ ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.383NzVJZgi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ns85hORyO8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.383NzVJZgi +++ cat /tmp/tmp.ns85hORyO8 +++ rm /tmp/tmp.383NzVJZgi /tmp/tmp.ns85hORyO8 +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/2a1ccd92-2701-4824-9ec7-0388be9fe086 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.sYGGYFAiXz +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.4gDQvMwFeC ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.sYGGYFAiXz ++++ cat /tmp/tmp.4gDQvMwFeC ++++ rm /tmp/tmp.sYGGYFAiXz /tmp/tmp.4gDQvMwFeC ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.PIRdFThCMf +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.TQdHw0STvX ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.PIRdFThCMf ++++ cat /tmp/tmp.TQdHw0STvX ++++ rm /tmp/tmp.PIRdFThCMf /tmp/tmp.TQdHw0STvX ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YgwRQjwuum ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JWGZ0MJ9eF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-24245 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.224.235.34/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.YgwRQjwuum +++ cat /tmp/tmp.JWGZ0MJ9eF +++ rm /tmp/tmp.YgwRQjwuum /tmp/tmp.JWGZ0MJ9eF +++ return 0 ++ echo + desc 'check customClusterName for pmm' + set +o xtrace ----------------------------------------------------------------------------------- check customClusterName for pmm ----------------------------------------------------------------------------------- + custom_name=custom-cluster-name + kubectl_bin patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":false}, {"op":"add","path":"/spec/pmm/customClusterName","value":custom-cluster-name}]' ++ mktemp + local LAST_OUT=/tmp/tmp.OHMqqjaBEN ++ mktemp + local LAST_ERR=/tmp/tmp.9NegSn9mbV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":false}, {"op":"add","path":"/spec/pmm/customClusterName","value":custom-cluster-name}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OHMqqjaBEN perconaservermongodb.psmdb.percona.com/monitoring patched + cat /tmp/tmp.9NegSn9mbV + rm /tmp/tmp.OHMqqjaBEN /tmp/tmp.9NegSn9mbV + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.e4IUCbQOd0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.6JDWp0PnRx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.e4IUCbQOd0 ++ cat /tmp/tmp.6JDWp0PnRx ++ rm /tmp/tmp.e4IUCbQOd0 /tmp/tmp.6JDWp0PnRx ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready...........OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9Rt8dNEQrL +++ mktemp ++ local LAST_ERR=/tmp/tmp.utyn1BrHlU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9Rt8dNEQrL ++ cat /tmp/tmp.utyn1BrHlU ++ rm /tmp/tmp.9Rt8dNEQrL /tmp/tmp.utyn1BrHlU ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HA4bc10DPQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.WFeLQdfJvn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HA4bc10DPQ ++ cat /tmp/tmp.WFeLQdfJvn ++ rm /tmp/tmp.HA4bc10DPQ /tmp/tmp.WFeLQdfJvn ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness............................................................................................................... ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ jq '.status.loadBalancer.ingress[].hostname' +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pLXcmpPKN6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UdOIflNQJK +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.pLXcmpPKN6 +++ cat /tmp/tmp.UdOIflNQJK +++ rm /tmp/tmp.pLXcmpPKN6 /tmp/tmp.UdOIflNQJK +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.KnyKmM6RKS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.lwcZpVVuAF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.KnyKmM6RKS +++ cat /tmp/tmp.lwcZpVVuAF +++ rm /tmp/tmp.KnyKmM6RKS /tmp/tmp.lwcZpVVuAF +++ return 0 ++ local ip=35.224.235.34 ++ '[' -n 35.224.235.34 -a 35.224.235.34 '!=' null ']' ++ echo 35.224.235.34 ++ return + curl -s -k -d '{"service_type":"MONGODB_SERVICE"}' https://admin:admin@35.224.235.34/v1/inventory/Services/List + check_custom_cluster_name monitoring-2-0-24245-monitoring-mongos-0 /tmp/tmp.DK7M6zKIcZ/pmm_service_list.json + local pod_service_name=monitoring-2-0-24245-monitoring-mongos-0 + local pmm_services_file=/tmp/tmp.DK7M6zKIcZ/pmm_service_list.json + echo 'Checking monitoring-2-0-24245-monitoring-mongos-0' Checking monitoring-2-0-24245-monitoring-mongos-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-24245-monitoring-mongos-0") | .cluster' /tmp/tmp.DK7M6zKIcZ/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + check_custom_cluster_name monitoring-2-0-24245-monitoring-rs0-0 /tmp/tmp.DK7M6zKIcZ/pmm_service_list.json + local pod_service_name=monitoring-2-0-24245-monitoring-rs0-0 + local pmm_services_file=/tmp/tmp.DK7M6zKIcZ/pmm_service_list.json + echo 'Checking monitoring-2-0-24245-monitoring-rs0-0' Checking monitoring-2-0-24245-monitoring-rs0-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-24245-monitoring-rs0-0") | .cluster' /tmp/tmp.DK7M6zKIcZ/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + check_custom_cluster_name monitoring-2-0-24245-monitoring-cfg-0 /tmp/tmp.DK7M6zKIcZ/pmm_service_list.json + local pod_service_name=monitoring-2-0-24245-monitoring-cfg-0 + local pmm_services_file=/tmp/tmp.DK7M6zKIcZ/pmm_service_list.json + echo 'Checking monitoring-2-0-24245-monitoring-cfg-0' Checking monitoring-2-0-24245-monitoring-cfg-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-24245-monitoring-cfg-0") | .cluster' /tmp/tmp.DK7M6zKIcZ/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + [[ -n '' ]] ++ kubectl_bin logs monitoring-rs0-0 pmm-client ++ grep -c 'cannot auto discover databases and collections' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2yOY0fETKm +++ mktemp ++ local LAST_ERR=/tmp/tmp.fZYxDYyxIp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2yOY0fETKm ++ cat /tmp/tmp.fZYxDYyxIp ++ rm /tmp/tmp.2yOY0fETKm /tmp/tmp.fZYxDYyxIp ++ return 0 + [[ 0 != 0 ]] + helm uninstall monitoring release "monitoring" uninstalled + destroy monitoring-2-0-24245 + local namespace=monitoring-2-0-24245 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.53RVsM1CBJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.eSiiFWAL0H ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.53RVsM1CBJ ++ cat /tmp/tmp.eSiiFWAL0H No resources found in monitoring-2-0-24245 namespace. ++ rm /tmp/tmp.53RVsM1CBJ /tmp/tmp.eSiiFWAL0H ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.QzeC5TRIx7 ++ mktemp + local LAST_ERR=/tmp/tmp.nllXDzzjuH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QzeC5TRIx7 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.nllXDzzjuH + rm /tmp/tmp.QzeC5TRIx7 /tmp/tmp.nllXDzzjuH + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.NdsRBIbEov ++ mktemp + local LAST_ERR=/tmp/tmp.x6ORGnB7IK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NdsRBIbEov + cat /tmp/tmp.x6ORGnB7IK + rm /tmp/tmp.NdsRBIbEov /tmp/tmp.x6ORGnB7IK + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.LsJgFYJ1tv ++ mktemp + local LAST_ERR=/tmp/tmp.x7nyKxRY4x + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LsJgFYJ1tv + cat /tmp/tmp.x7nyKxRY4x + rm /tmp/tmp.LsJgFYJ1tv /tmp/tmp.x7nyKxRY4x + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.SZxj5Aqy7Y ++ mktemp + local LAST_ERR=/tmp/tmp.QCs7WuQGwd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SZxj5Aqy7Y + cat /tmp/tmp.QCs7WuQGwd + rm /tmp/tmp.SZxj5Aqy7Y /tmp/tmp.QCs7WuQGwd + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.QcEHdwdX9P ++ mktemp + local LAST_ERR=/tmp/tmp.44L5tN0C4r + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2283/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QcEHdwdX9P clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.44L5tN0C4r + rm /tmp/tmp.QcEHdwdX9P /tmp/tmp.44L5tN0C4r + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.WRtfompnyr ++ mktemp + local LAST_ERR=/tmp/tmp.eMJMUDbCyD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.WRtfompnyr namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted from cert-manager namespace serviceaccount "cert-manager" deleted from cert-manager namespace serviceaccount "cert-manager-webhook" deleted from cert-manager namespace clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager-tokenrequest" deleted from cert-manager namespace role.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted from cert-manager namespace rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.eMJMUDbCyD Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.WRtfompnyr namespace "cert-manager" deleted + cat /tmp/tmp.eMJMUDbCyD Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.WRtfompnyr + cat /tmp/tmp.eMJMUDbCyD Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.WRtfompnyr + cat /tmp/tmp.eMJMUDbCyD Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.WRtfompnyr /tmp/tmp.eMJMUDbCyD + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace monitoring-2-0-24245 + rm -rf /tmp/tmp.DK7M6zKIcZ + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.kHJDlfon5I + local LAST_OUT=/tmp/tmp.imt7azr0vf ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.UyQD0sJGdM + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.AtHHqjsYqO + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace monitoring-2-0-24245 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator