Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/logs/monitoring-2-0.log grep: warning: stray \ before - Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + create_infra monitoring-2-0-3412 + local ns=monitoring-2-0-3412 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.yLFXjL7i76 ++ mktemp + local LAST_ERR=/tmp/tmp.tFRr0CHBDz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yLFXjL7i76 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.tFRr0CHBDz + rm /tmp/tmp.yLFXjL7i76 /tmp/tmp.tFRr0CHBDz + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.WmtTh2vddN ++ mktemp + local LAST_ERR=/tmp/tmp.29FdgDTIif + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WmtTh2vddN + cat /tmp/tmp.29FdgDTIif + rm /tmp/tmp.WmtTh2vddN /tmp/tmp.29FdgDTIif + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.EEIGQYbOl0 ++ mktemp + local LAST_ERR=/tmp/tmp.Zc0Pm0XOxF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EEIGQYbOl0 + cat /tmp/tmp.Zc0Pm0XOxF + rm /tmp/tmp.EEIGQYbOl0 /tmp/tmp.Zc0Pm0XOxF + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.iTll9eE8OM ++ mktemp + local LAST_ERR=/tmp/tmp.a49JPPwqXN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iTll9eE8OM + cat /tmp/tmp.a49JPPwqXN + rm /tmp/tmp.iTll9eE8OM /tmp/tmp.a49JPPwqXN + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.ADXVN0TQgR ++ mktemp + local LAST_ERR=/tmp/tmp.l0PMWImnB6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ADXVN0TQgR clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.l0PMWImnB6 + rm /tmp/tmp.ADXVN0TQgR /tmp/tmp.l0PMWImnB6 + return 0 + check_crd_for_deletion PR-2129-32754096 + local git_tag=PR-2129-32754096 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2129-32754096/deploy/crd.yaml ++ /usr/sbin/sed s/---//g ++ yq eval .metadata.name ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tnziYcw41e +++ mktemp ++ local LAST_ERR=/tmp/tmp.MWeRx8Bo7g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.tnziYcw41e ++ cat /tmp/tmp.MWeRx8Bo7g Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.tnziYcw41e ++ cat /tmp/tmp.MWeRx8Bo7g Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.tnziYcw41e ++ cat /tmp/tmp.MWeRx8Bo7g Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.tnziYcw41e ++ cat /tmp/tmp.MWeRx8Bo7g Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.tnziYcw41e /tmp/tmp.MWeRx8Bo7g ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ awk '{print $1}' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get clusterrole ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + xargs kubectl delete ns egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_OUT=/tmp/tmp.WI4NgmkDPa ++ mktemp + local LAST_OUT=/tmp/tmp.pYyMAvL308 ++ mktemp + local LAST_ERR=/tmp/tmp.TH29SFl73W + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.W2ULZZ3Nf0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WI4NgmkDPa + cat /tmp/tmp.TH29SFl73W + rm /tmp/tmp.WI4NgmkDPa /tmp/tmp.TH29SFl73W + return 0 namespace "cert-manager" deleted namespace "monitoring-2-0-29159" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pYyMAvL308 namespace "psmdb-operator" deleted + cat /tmp/tmp.W2ULZZ3Nf0 + rm /tmp/tmp.pYyMAvL308 /tmp/tmp.W2ULZZ3Nf0 + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.rF2ghLc46p ++ mktemp + local LAST_ERR=/tmp/tmp.jgYXWTsgOl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rF2ghLc46p + cat /tmp/tmp.jgYXWTsgOl + rm /tmp/tmp.rF2ghLc46p /tmp/tmp.jgYXWTsgOl + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.2CsxbVJyPO ++ mktemp + local LAST_ERR=/tmp/tmp.19uYl5pudz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2CsxbVJyPO namespace/psmdb-operator created + cat /tmp/tmp.19uYl5pudz + rm /tmp/tmp.2CsxbVJyPO /tmp/tmp.19uYl5pudz + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.2zF1kIeKe3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.MM19r6Tc3T ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2zF1kIeKe3 ++ cat /tmp/tmp.MM19r6Tc3T ++ rm /tmp/tmp.2zF1kIeKe3 /tmp/tmp.MM19r6Tc3T ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2129-32754096-2-cluster4 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ztzVgk240g ++ mktemp + local LAST_ERR=/tmp/tmp.2brgPMFhMq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2129-32754096-2-cluster4 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ztzVgk240g Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2129-32754096-2-cluster4" modified. + cat /tmp/tmp.2brgPMFhMq + rm /tmp/tmp.ztzVgk240g /tmp/tmp.2brgPMFhMq + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2129-32754096' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2129-32754096 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.46xJLaXNjO ++ mktemp + local LAST_ERR=/tmp/tmp.3Tw6chXuMX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.46xJLaXNjO customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.3Tw6chXuMX + rm /tmp/tmp.46xJLaXNjO /tmp/tmp.3Tw6chXuMX + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.32A88Whbqe ++ mktemp + local LAST_ERR=/tmp/tmp.3RntGS0uUr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.32A88Whbqe clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.3RntGS0uUr + rm /tmp/tmp.32A88Whbqe /tmp/tmp.3RntGS0uUr + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2129-32754096") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.XLeh49e9Tq ++ mktemp + local LAST_ERR=/tmp/tmp.WWqbEW7mqu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XLeh49e9Tq deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.WWqbEW7mqu + rm /tmp/tmp.XLeh49e9Tq /tmp/tmp.WWqbEW7mqu + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.y27Xz3YQU1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.3eCEVEw3RA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.y27Xz3YQU1 ++ cat /tmp/tmp.3eCEVEw3RA ++ rm /tmp/tmp.y27Xz3YQU1 /tmp/tmp.3eCEVEw3RA ++ return 0 + wait_operator_pod percona-server-mongodb-operator-555fff66c-k5bs7 + local pod=percona-server-mongodb-operator-555fff66c-k5bs7 + set +o xtrace waiting for pod/percona-server-mongodb-operator-555fff66c-k5bs7 to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.kqo4EzetB9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.B16sEuwHbx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kqo4EzetB9 ++ cat /tmp/tmp.B16sEuwHbx ++ rm /tmp/tmp.kqo4EzetB9 /tmp/tmp.B16sEuwHbx ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-555fff66c-k5bs7 ++ mktemp + local LAST_OUT=/tmp/tmp.vSIeNdipMJ ++ mktemp + local LAST_ERR=/tmp/tmp.IndRRnXmql + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-555fff66c-k5bs7 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vSIeNdipMJ + cat /tmp/tmp.IndRRnXmql + rm /tmp/tmp.vSIeNdipMJ /tmp/tmp.IndRRnXmql + return 0 2025-12-09T23:05:41.767Z INFO setup Manager starting up {"gitCommit": "327540962b60864b2a132b40789dd392de0b23a6", "gitBranch": "PR-2129-32754096", "buildTime": "", "goVersion": "go1.25.5", "os": "linux", "arch": "amd64"} + create_namespace monitoring-2-0-3412 + local namespace=monitoring-2-0-3412 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + awk '{print$1}' + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces monitoring-2-0-3412' + local LAST_OUT=/tmp/tmp.Nj7oXOVnKG egrep: warning: egrep is obsolescent; using grep -E + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-2-0-3412 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-3412 --ignore-not-found ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.gNLxIWCZBp + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.MuRxJbqCBM + for i in $(seq 0 2) + set +e + kubectl get ns ++ mktemp + local LAST_ERR=/tmp/tmp.mIJkQpfa4n + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace monitoring-2-0-3412 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Nj7oXOVnKG + cat /tmp/tmp.gNLxIWCZBp + rm /tmp/tmp.Nj7oXOVnKG /tmp/tmp.gNLxIWCZBp + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MuRxJbqCBM + cat /tmp/tmp.mIJkQpfa4n + rm /tmp/tmp.MuRxJbqCBM /tmp/tmp.mIJkQpfa4n + return 0 + kubectl_bin wait --for=delete namespace monitoring-2-0-3412 ++ mktemp + local LAST_OUT=/tmp/tmp.yZOO3DvfAT ++ mktemp + local LAST_ERR=/tmp/tmp.PJwkkQ2YgF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace monitoring-2-0-3412 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yZOO3DvfAT + cat /tmp/tmp.PJwkkQ2YgF + rm /tmp/tmp.yZOO3DvfAT /tmp/tmp.PJwkkQ2YgF + return 0 + desc 'create namespace monitoring-2-0-3412' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-3412 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-3412 ++ mktemp + local LAST_OUT=/tmp/tmp.DiNt2QNuAW ++ mktemp + local LAST_ERR=/tmp/tmp.mXmpmQP2vB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace monitoring-2-0-3412 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DiNt2QNuAW namespace/monitoring-2-0-3412 created + cat /tmp/tmp.mXmpmQP2vB + rm /tmp/tmp.DiNt2QNuAW /tmp/tmp.mXmpmQP2vB + return 0 + set_kube_ctx monitoring-2-0-3412 + local namespace=monitoring-2-0-3412 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.te8IpMeRYv +++ mktemp ++ local LAST_ERR=/tmp/tmp.jIIp4alTT4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.te8IpMeRYv ++ cat /tmp/tmp.jIIp4alTT4 ++ rm /tmp/tmp.te8IpMeRYv /tmp/tmp.jIIp4alTT4 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2129-32754096-2-cluster4 --namespace=monitoring-2-0-3412 ++ mktemp + local LAST_OUT=/tmp/tmp.WO8cVLEzBS ++ mktemp + local LAST_ERR=/tmp/tmp.WsHIdF5nJU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2129-32754096-2-cluster4 --namespace=monitoring-2-0-3412 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WO8cVLEzBS Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2129-32754096-2-cluster4" modified. + cat /tmp/tmp.WsHIdF5nJU + rm /tmp/tmp.WO8cVLEzBS /tmp/tmp.WsHIdF5nJU + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.hoHZgZ2Nrr ++ mktemp + local LAST_ERR=/tmp/tmp.SEuB2VSnMU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hoHZgZ2Nrr namespace/cert-manager created + cat /tmp/tmp.SEuB2VSnMU + rm /tmp/tmp.hoHZgZ2Nrr /tmp/tmp.SEuB2VSnMU + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.Xl42qZihgT ++ mktemp + local LAST_ERR=/tmp/tmp.KUqaS2PwGf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Xl42qZihgT namespace/cert-manager labeled + cat /tmp/tmp.KUqaS2PwGf + rm /tmp/tmp.Xl42qZihgT /tmp/tmp.KUqaS2PwGf + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.To5Le9H0iO ++ mktemp + local LAST_ERR=/tmp/tmp.9c3AA5F1EB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.To5Le9H0iO namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.9c3AA5F1EB Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.To5Le9H0iO /tmp/tmp.9c3AA5F1EB + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.CFhlR4eA3B ++ mktemp + local LAST_ERR=/tmp/tmp.eZnS2X6RqK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CFhlR4eA3B pod/cert-manager-cainjector-5dc9c8b4f7-clgw9 condition met pod/cert-manager-df4b69479-ms5pm condition met pod/cert-manager-webhook-769bbb594d-m2h7n condition met + cat /tmp/tmp.eZnS2X6RqK + rm /tmp/tmp.CFhlR4eA3B /tmp/tmp.eZnS2X6RqK + return 0 + sleep 120 + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + helm repo remove stable "stable" has been removed from your repositories + helm repo add stable https://charts.helm.sh/stable "stable" has been added to your repositories + [[ -n '' ]] + retry 10 60 helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Tue Dec 9 23:08:51 2025 NAMESPACE: monitoring-2-0-3412 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-3412.svc.cluster.local:443 login: admin password: admin + sleep 40 + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.fUWRv6MpZk ++ mktemp + local LAST_ERR=/tmp/tmp.sMtWuTSsb4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.fUWRv6MpZk + cat /tmp/tmp.sMtWuTSsb4 error: Internal error occurred: unable to upgrade connection: container not found ("monitoring") + sleep 0 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.fUWRv6MpZk + cat /tmp/tmp.sMtWuTSsb4 error: Internal error occurred: unable to upgrade connection: container not found ("monitoring") + sleep 4 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.fUWRv6MpZk + cat /tmp/tmp.sMtWuTSsb4 error: Internal error occurred: unable to upgrade connection: container not found ("monitoring") + sleep 8 + cat /tmp/tmp.fUWRv6MpZk + cat /tmp/tmp.sMtWuTSsb4 error: Internal error occurred: unable to upgrade connection: container not found ("monitoring") + rm /tmp/tmp.fUWRv6MpZk /tmp/tmp.sMtWuTSsb4 + return 1 + echo 'Retry 0' Retry 0 + sleep 5 + let retry+=1 + '[' 1 -ge 30 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.W6fYh3QbBK ++ mktemp + local LAST_ERR=/tmp/tmp.JvqEJ5aLnJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.W6fYh3QbBK + cat /tmp/tmp.JvqEJ5aLnJ + rm /tmp/tmp.W6fYh3QbBK /tmp/tmp.JvqEJ5aLnJ + return 0 + cluster=monitoring + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.hi6QQktomy ++ mktemp + local LAST_ERR=/tmp/tmp.oz8VbqRLhz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hi6QQktomy secret/some-users created secret/some-users unchanged + cat /tmp/tmp.oz8VbqRLhz + rm /tmp/tmp.hi6QQktomy /tmp/tmp.oz8VbqRLhz + return 0 + yq '.spec.template.spec.volumes[0].secret.secretName="monitoring-ssl"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/conf/client_with_tls.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.2I8iKiP9Kb ++ mktemp + local LAST_ERR=/tmp/tmp.2IxMBC6QoE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2I8iKiP9Kb deployment.apps/psmdb-client created + cat /tmp/tmp.2IxMBC6QoE + rm /tmp/tmp.2I8iKiP9Kb /tmp/tmp.2IxMBC6QoE + return 0 + sleep 90 + desc 'create first PSMDB cluster monitoring' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster monitoring ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + kubectl_bin apply -f - ++ mktemp + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2129-32754096"' + local LAST_OUT=/tmp/tmp.kEENC81tSQ + yq eval '(.spec | select(has("backup"))).backup.image = "percona/percona-backup-mongodb:2.11.0"' ++ mktemp + local LAST_ERR=/tmp/tmp.cy9l6k7g31 + local exit_status=0 + local timeout=4 + yq eval '.spec.upgradeOptions.apply="Never"' ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kEENC81tSQ perconaservermongodb.psmdb.percona.com/monitoring created + cat /tmp/tmp.cy9l6k7g31 + rm /tmp/tmp.kEENC81tSQ /tmp/tmp.cy9l6k7g31 + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready..........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lWjVLdc2LF +++ mktemp ++ local LAST_ERR=/tmp/tmp.kGP7vvOjIK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lWjVLdc2LF ++ cat /tmp/tmp.kGP7vvOjIK ++ rm /tmp/tmp.lWjVLdc2LF /tmp/tmp.kGP7vvOjIK ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready...........OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jCl33Np2C3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.NZE4JJiWKU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jCl33Np2C3 ++ cat /tmp/tmp.NZE4JJiWKU ++ rm /tmp/tmp.jCl33Np2C3 /tmp/tmp.NZE4JJiWKU ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qV9KoG1iqK +++ mktemp ++ local LAST_ERR=/tmp/tmp.eMq33elGrj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qV9KoG1iqK ++ cat /tmp/tmp.eMq33elGrj ++ rm /tmp/tmp.qV9KoG1iqK /tmp/tmp.eMq33elGrj ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness...................... + desc 'check if pmm-client container is not enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container is not enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 -no-pmm + local resource=statefulset/monitoring-rs0 + local postfix=-no-pmm + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml + local new_result=/tmp/tmp.DuPpQJQlN6/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-3412", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.1E2zs9Xnlj ++ mktemp + local LAST_ERR=/tmp/tmp.VOVsDEKPag + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1E2zs9Xnlj + cat /tmp/tmp.VOVsDEKPag + rm /tmp/tmp.1E2zs9Xnlj /tmp/tmp.VOVsDEKPag + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.DuPpQJQlN6/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.DuPpQJQlN6/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.DuPpQJQlN6/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml /tmp/tmp.DuPpQJQlN6/statefulset_monitoring-rs0.yml + log 'compare_kubectl: statefulset/monitoring-rs0 OK' + set +o xtrace [2025-12-09T23:13:51+0000] compare_kubectl: statefulset/monitoring-rs0 OK + sleep 10 + custom_port=27019 + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-3412 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-3412 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gk0AtYIjO5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.cX1VCT4SJb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gk0AtYIjO5 ++ cat /tmp/tmp.cX1VCT4SJb ++ rm /tmp/tmp.gk0AtYIjO5 /tmp/tmp.cX1VCT4SJb ++ return 0 + local client_container=psmdb-client-5cc588475d-tqnk7 + kubectl_bin exec psmdb-client-5cc588475d-tqnk7 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-3412.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.VPWzrE0cxq ++ mktemp + local LAST_ERR=/tmp/tmp.XqAQT1Sfb9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5cc588475d-tqnk7 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-3412.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VPWzrE0cxq Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-3412.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2025-12-09T23:14:04.085Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("d2e15eb3-7ca0-470c-b537-3c6447613090") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.XqAQT1Sfb9 + rm /tmp/tmp.VPWzrE0cxq /tmp/tmp.XqAQT1Sfb9 + return 0 + run_mongos 'sh.enableSharding("myApp")' clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-3412 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=sh.enableSharding("myApp")' + local uri=clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-3412 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q6SnFA22iF +++ mktemp ++ local LAST_ERR=/tmp/tmp.cJ4nwtbSQ7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q6SnFA22iF ++ cat /tmp/tmp.cJ4nwtbSQ7 ++ rm /tmp/tmp.q6SnFA22iF /tmp/tmp.cJ4nwtbSQ7 ++ return 0 + local client_container=psmdb-client-5cc588475d-tqnk7 + kubectl_bin exec psmdb-client-5cc588475d-tqnk7 -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-3412.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.WS5R26dSYE ++ mktemp + local LAST_ERR=/tmp/tmp.NV3xCCXYGC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5cc588475d-tqnk7 -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-3412.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WS5R26dSYE Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-3412.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2025-12-09T23:14:06.897Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("b003c6c1-ece2-4b2f-a617-84f8ff1b95f1") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1765322046, 9), "signature" : { "hash" : BinData(0,"WSYDTSr2cmMZytpbWmi5es1yMKo="), "keyId" : NumberLong("7582000102290489351") } }, "operationTime" : Timestamp(1765322046, 6) } bye + cat /tmp/tmp.NV3xCCXYGC + rm /tmp/tmp.WS5R26dSYE /tmp/tmp.NV3xCCXYGC + return 0 + insert_data_mongos 100500 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100500 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@monitoring-mongos.monitoring-2-0-3412 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-3412 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FcVGlUn5VO +++ mktemp ++ local LAST_ERR=/tmp/tmp.THCv3aFbbi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FcVGlUn5VO ++ cat /tmp/tmp.THCv3aFbbi ++ rm /tmp/tmp.FcVGlUn5VO /tmp/tmp.THCv3aFbbi ++ return 0 + local client_container=psmdb-client-5cc588475d-tqnk7 + kubectl_bin exec psmdb-client-5cc588475d-tqnk7 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-3412.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.y8SsAAYClw ++ mktemp + local LAST_ERR=/tmp/tmp.3JR2H3mgjc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5cc588475d-tqnk7 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-3412.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.y8SsAAYClw Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-3412.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2025-12-09T23:14:09.756Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("5febf53c-7618-48e2-aba5-ccf988edac78") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.3JR2H3mgjc + rm /tmp/tmp.y8SsAAYClw /tmp/tmp.3JR2H3mgjc + return 0 + insert_data_mongos 100600 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100600 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100600 })' myApp:myPass@monitoring-mongos.monitoring-2-0-3412 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100600 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-3412 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.si7qgpyTqV +++ mktemp ++ local LAST_ERR=/tmp/tmp.yk4o57hY9u ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.si7qgpyTqV ++ cat /tmp/tmp.yk4o57hY9u ++ rm /tmp/tmp.si7qgpyTqV /tmp/tmp.yk4o57hY9u ++ return 0 + local client_container=psmdb-client-5cc588475d-tqnk7 + kubectl_bin exec psmdb-client-5cc588475d-tqnk7 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-3412.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.Edj12MQsvp ++ mktemp + local LAST_ERR=/tmp/tmp.7iObKoDXjN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5cc588475d-tqnk7 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-3412.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Edj12MQsvp Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-3412.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2025-12-09T23:14:12.638Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("c9f6e7f3-b738-4f83-bf9f-c85095f1eda1") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.7iObKoDXjN + rm /tmp/tmp.Edj12MQsvp /tmp/tmp.7iObKoDXjN + return 0 + insert_data_mongos 100700 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100700 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100700 })' myApp:myPass@monitoring-mongos.monitoring-2-0-3412 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100700 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-3412 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TAkxqAi0g2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZBXNDELKyA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TAkxqAi0g2 ++ cat /tmp/tmp.ZBXNDELKyA ++ rm /tmp/tmp.TAkxqAi0g2 /tmp/tmp.ZBXNDELKyA ++ return 0 + local client_container=psmdb-client-5cc588475d-tqnk7 + kubectl_bin exec psmdb-client-5cc588475d-tqnk7 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-3412.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.Suja3VLmnj ++ mktemp + local LAST_ERR=/tmp/tmp.MkukM0H8fy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5cc588475d-tqnk7 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-3412.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Suja3VLmnj Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-3412.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2025-12-09T23:14:15.522Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("8b7319ab-abfb-407c-9b2d-48881656415a") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.MkukM0H8fy + rm /tmp/tmp.Suja3VLmnj /tmp/tmp.MkukM0H8fy + return 0 + desc 'add PMM_SERVER_API_KEY for secret some-users' + set +o xtrace ----------------------------------------------------------------------------------- add PMM_SERVER_API_KEY for secret some-users ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ohUK2h10A4 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.twKvIhUUA6 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.ohUK2h10A4 ++++ cat /tmp/tmp.twKvIhUUA6 ++++ rm /tmp/tmp.ohUK2h10A4 /tmp/tmp.twKvIhUUA6 ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' +++++ mktemp ++++ sed -e 's/^"//; s/"$//;' ++++ local LAST_OUT=/tmp/tmp.xbHWhzHD3Z +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.7v1gvE09Qp ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.xbHWhzHD3Z ++++ cat /tmp/tmp.7v1gvE09Qp ++++ rm /tmp/tmp.xbHWhzHD3Z /tmp/tmp.7v1gvE09Qp ++++ return 0 +++ local ip=35.202.20.27 +++ '[' -n 35.202.20.27 -a 35.202.20.27 '!=' null ']' +++ echo 35.202.20.27 +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@35.202.20.27/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 155 100 119 100 36 270 81 --:--:-- --:--:-- --:--:-- 352 + API_KEY='"eyJrIjoib1d4Wm1QMmVpbEc2SmxVWDZhR0NHSWtRbU5PS3oyRjYiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' + kubectl_bin patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoib1d4Wm1QMmVpbEc2SmxVWDZhR0NHSWtRbU5PS3oyRjYiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' ++ mktemp + local LAST_OUT=/tmp/tmp.bgSsWp5Q91 ++ mktemp + local LAST_ERR=/tmp/tmp.WCpjiSKi5w + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoib1d4Wm1QMmVpbEc2SmxVWDZhR0NHSWtRbU5PS3oyRjYiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bgSsWp5Q91 secret/some-users patched + cat /tmp/tmp.WCpjiSKi5w + rm /tmp/tmp.bgSsWp5Q91 /tmp/tmp.WCpjiSKi5w + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.M19uFbbEYw +++ mktemp ++ local LAST_ERR=/tmp/tmp.Konexzm2h9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M19uFbbEYw ++ cat /tmp/tmp.Konexzm2h9 ++ rm /tmp/tmp.M19uFbbEYw /tmp/tmp.Konexzm2h9 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready.OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kQqRFRKtiG +++ mktemp ++ local LAST_ERR=/tmp/tmp.28N2WGNQou ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kQqRFRKtiG ++ cat /tmp/tmp.28N2WGNQou ++ rm /tmp/tmp.kQqRFRKtiG /tmp/tmp.28N2WGNQou ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HuIlZM9N5W +++ mktemp ++ local LAST_ERR=/tmp/tmp.TUqhLEvPrf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HuIlZM9N5W ++ cat /tmp/tmp.TUqhLEvPrf ++ rm /tmp/tmp.HuIlZM9N5W /tmp/tmp.TUqhLEvPrf ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness................................................................................................................................................... + sleep 90 + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 + local resource=statefulset/monitoring-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml + local new_result=/tmp/tmp.DuPpQJQlN6/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-3412", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.uCC7ipI2Vj ++ mktemp + local LAST_ERR=/tmp/tmp.9HLSEvAoqy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uCC7ipI2Vj + cat /tmp/tmp.9HLSEvAoqy + rm /tmp/tmp.uCC7ipI2Vj /tmp/tmp.9HLSEvAoqy + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.DuPpQJQlN6/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.DuPpQJQlN6/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.DuPpQJQlN6/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml /tmp/tmp.DuPpQJQlN6/statefulset_monitoring-rs0.yml + log 'compare_kubectl: statefulset/monitoring-rs0 OK' + set +o xtrace [2025-12-09T23:21:12+0000] compare_kubectl: statefulset/monitoring-rs0 OK + compare_kubectl service/monitoring-rs0 + local resource=service/monitoring-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml + local new_result=/tmp/tmp.DuPpQJQlN6/service_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-3412", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.wckDMV9P0K ++ mktemp + local LAST_ERR=/tmp/tmp.LkosIFjbHj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wckDMV9P0K + cat /tmp/tmp.LkosIFjbHj + rm /tmp/tmp.wckDMV9P0K /tmp/tmp.LkosIFjbHj + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.DuPpQJQlN6/service_monitoring-rs0.yml + version_gt 1.22 ++ bc -l ++ echo '1.31 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.DuPpQJQlN6/service_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.DuPpQJQlN6/service_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml /tmp/tmp.DuPpQJQlN6/service_monitoring-rs0.yml + log 'compare_kubectl: service/monitoring-rs0 OK' + set +o xtrace [2025-12-09T23:21:12+0000] compare_kubectl: service/monitoring-rs0 OK + compare_kubectl service/monitoring-mongos + local resource=service/monitoring-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml + local new_result=/tmp/tmp.DuPpQJQlN6/service_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-3412", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.Fi7HmSbsLC ++ mktemp + local LAST_ERR=/tmp/tmp.xVcfRrt3rP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Fi7HmSbsLC + cat /tmp/tmp.xVcfRrt3rP + rm /tmp/tmp.Fi7HmSbsLC /tmp/tmp.xVcfRrt3rP + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.DuPpQJQlN6/service_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.DuPpQJQlN6/service_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.DuPpQJQlN6/service_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml /tmp/tmp.DuPpQJQlN6/service_monitoring-mongos.yml + log 'compare_kubectl: service/monitoring-mongos OK' + set +o xtrace [2025-12-09T23:21:14+0000] compare_kubectl: service/monitoring-mongos OK + compare_kubectl statefulset/monitoring-cfg + local resource=statefulset/monitoring-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml + local new_result=/tmp/tmp.DuPpQJQlN6/statefulset_monitoring-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-3412", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.Grzs6Nusi3 ++ mktemp + local LAST_ERR=/tmp/tmp.xj0YKfSMPg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Grzs6Nusi3 + cat /tmp/tmp.xj0YKfSMPg + rm /tmp/tmp.Grzs6Nusi3 /tmp/tmp.xj0YKfSMPg + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.DuPpQJQlN6/statefulset_monitoring-cfg.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.DuPpQJQlN6/statefulset_monitoring-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.DuPpQJQlN6/statefulset_monitoring-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml /tmp/tmp.DuPpQJQlN6/statefulset_monitoring-cfg.yml + log 'compare_kubectl: statefulset/monitoring-cfg OK' + set +o xtrace [2025-12-09T23:21:14+0000] compare_kubectl: statefulset/monitoring-cfg OK + compare_kubectl statefulset/monitoring-mongos + local resource=statefulset/monitoring-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml + local new_result=/tmp/tmp.DuPpQJQlN6/statefulset_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-3412", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.HrRCBGjgV5 ++ mktemp + local LAST_ERR=/tmp/tmp.xFVNT4WVH0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HrRCBGjgV5 + cat /tmp/tmp.xFVNT4WVH0 + rm /tmp/tmp.HrRCBGjgV5 /tmp/tmp.xFVNT4WVH0 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.DuPpQJQlN6/statefulset_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.DuPpQJQlN6/statefulset_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.DuPpQJQlN6/statefulset_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml /tmp/tmp.DuPpQJQlN6/statefulset_monitoring-mongos.yml + log 'compare_kubectl: statefulset/monitoring-mongos OK' + set +o xtrace [2025-12-09T23:21:15+0000] compare_kubectl: statefulset/monitoring-mongos OK + desc 'check mongod metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongod metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-3412-monitoring-rs0-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-3412-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1765322415 ++ /usr/sbin/date -u +%s + local end=1765322475 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hUZpHkkS6L ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CvZW4dsWUz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.hUZpHkkS6L +++ cat /tmp/tmp.CvZW4dsWUz +++ rm /tmp/tmp.hUZpHkkS6L /tmp/tmp.CvZW4dsWUz +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RbpG7bdCGk ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aT5HrYhfcm +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RbpG7bdCGk +++ cat /tmp/tmp.aT5HrYhfcm +++ rm /tmp/tmp.RbpG7bdCGk /tmp/tmp.aT5HrYhfcm +++ return 0 ++ local ip=35.202.20.27 ++ '[' -n 35.202.20.27 -a 35.202.20.27 '!=' null ']' ++ echo 35.202.20.27 ++ return + local endpoint=35.202.20.27 + jq '.data.result[0].values[][1]' + curl -s -k 'https://admin:admin@35.202.20.27/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-3412-monitoring-rs0-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-3412-monitoring-rs0-1%22%7D%29&start=1765322415&end=1765322475&step=60' + grep '^"[0-9]' "1765315781" "1765315781" + get_metric_values mongodb_connections monitoring-2-0-3412-monitoring-rs0-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-3412-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1765322418 ++ /usr/sbin/date -u +%s + local end=1765322478 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8jZYfsummS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FBLjcTsUL0 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.8jZYfsummS +++ cat /tmp/tmp.FBLjcTsUL0 +++ rm /tmp/tmp.8jZYfsummS /tmp/tmp.FBLjcTsUL0 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3rioukpER5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uoRYJtoUkX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.3rioukpER5 +++ cat /tmp/tmp.uoRYJtoUkX +++ rm /tmp/tmp.3rioukpER5 /tmp/tmp.uoRYJtoUkX +++ return 0 ++ local ip=35.202.20.27 ++ '[' -n 35.202.20.27 -a 35.202.20.27 '!=' null ']' ++ echo 35.202.20.27 ++ return + local endpoint=35.202.20.27 + curl -s -k 'https://admin:admin@35.202.20.27/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-3412-monitoring-rs0-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-3412-monitoring-rs0-1%22%7D%29&start=1765322418&end=1765322478&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "0" "0" + desc 'check mongo config metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongo config metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-3412-monitoring-cfg-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-3412-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1765322421 ++ /usr/sbin/date -u +%s + local end=1765322481 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.h2x4VYMbP9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.oFlwOLFska +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.h2x4VYMbP9 +++ cat /tmp/tmp.oFlwOLFska +++ rm /tmp/tmp.h2x4VYMbP9 /tmp/tmp.oFlwOLFska +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YN6LApRGFc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YHLNtaoVu7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.YN6LApRGFc +++ cat /tmp/tmp.YHLNtaoVu7 +++ rm /tmp/tmp.YN6LApRGFc /tmp/tmp.YHLNtaoVu7 +++ return 0 ++ local ip=35.202.20.27 ++ '[' -n 35.202.20.27 -a 35.202.20.27 '!=' null ']' ++ echo 35.202.20.27 ++ return + local endpoint=35.202.20.27 + curl -s -k 'https://admin:admin@35.202.20.27/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-3412-monitoring-cfg-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-3412-monitoring-cfg-1%22%7D%29&start=1765322421&end=1765322481&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "1765315779" "1765315779" + get_metric_values mongodb_connections monitoring-2-0-3412-monitoring-cfg-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-3412-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1765322423 ++ /usr/sbin/date -u +%s + local end=1765322483 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4jY0yNVwBM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.lLsKOZyuyf +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4jY0yNVwBM +++ cat /tmp/tmp.lLsKOZyuyf +++ rm /tmp/tmp.4jY0yNVwBM /tmp/tmp.lLsKOZyuyf +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Bmg1KQB7oA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yifPOMZT9L +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Bmg1KQB7oA +++ cat /tmp/tmp.yifPOMZT9L +++ rm /tmp/tmp.Bmg1KQB7oA /tmp/tmp.yifPOMZT9L +++ return 0 ++ local ip=35.202.20.27 ++ '[' -n 35.202.20.27 -a 35.202.20.27 '!=' null ']' ++ echo 35.202.20.27 ++ return + local endpoint=35.202.20.27 + jq '.data.result[0].values[][1]' + curl -s -k 'https://admin:admin@35.202.20.27/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-3412-monitoring-cfg-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-3412-monitoring-cfg-1%22%7D%29&start=1765322423&end=1765322483&step=60' + grep '^"[0-9]' "0" "0" + desc 'check mongos metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongos metrics ----------------------------------------------------------------------------------- ++ kubectl get pod -l app.kubernetes.io/component=mongos -o 'jsonpath={.items[0].metadata.name}' + MONGOS_POD_NAME=monitoring-mongos-0 + get_metric_values node_boot_time_seconds monitoring-2-0-3412-monitoring-mongos-0 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-3412-monitoring-mongos-0 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1765322426 ++ /usr/sbin/date -u +%s + local end=1765322486 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cPUoqV6NZH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZAuFmUmPMx +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.cPUoqV6NZH +++ cat /tmp/tmp.ZAuFmUmPMx +++ rm /tmp/tmp.cPUoqV6NZH /tmp/tmp.ZAuFmUmPMx +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9vHTipNgna ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LqN6WfAZBv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.9vHTipNgna +++ cat /tmp/tmp.LqN6WfAZBv +++ rm /tmp/tmp.9vHTipNgna /tmp/tmp.LqN6WfAZBv +++ return 0 ++ local ip=35.202.20.27 ++ '[' -n 35.202.20.27 -a 35.202.20.27 '!=' null ']' ++ echo 35.202.20.27 ++ return + local endpoint=35.202.20.27 + jq '.data.result[0].values[][1]' + grep '^"[0-9]' + curl -s -k 'https://admin:admin@35.202.20.27/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-3412-monitoring-mongos-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-3412-monitoring-mongos-0%22%7D%29&start=1765322426&end=1765322486&step=60' "1765315781" "1765315781" + sleep 90 + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan_values mongodb dev-mongod admin:admin + local service_type=mongodb + local environment=dev-mongod + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/sbin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2025-12-09T11:22:59+00:00 ++ /usr/sbin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2025-12-09T23:22:59+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0BjjvzhDho ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QGH7B87hTw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.0BjjvzhDho +++ cat /tmp/tmp.QGH7B87hTw +++ rm /tmp/tmp.0BjjvzhDho /tmp/tmp.QGH7B87hTw +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OOsctKSLv7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AgB4j7PEE1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.OOsctKSLv7 +++ cat /tmp/tmp.AgB4j7PEE1 +++ rm /tmp/tmp.OOsctKSLv7 /tmp/tmp.AgB4j7PEE1 +++ return 0 ++ local ip=35.202.20.27 ++ '[' -n 35.202.20.27 -a 35.202.20.27 '!=' null ']' ++ echo 35.202.20.27 ++ return + endpoint=35.202.20.27 + cat + local response + retry=0 ++ curl -s -k -XPOST -d @payload.json https://admin:admin@35.202.20.27/v0/qan/GetReport ++ jq '.rows[].sparkline' + [[ [ { "time_frame": 360, "timestamp": "2025-12-09T23:22:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2025-12-09T23:16:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2025-12-09T23:10:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2025-12-09T23:04:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2025-12-09T22:58:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2025-12-09T22:52:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2025-12-09T22:46:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2025-12-09T22:40:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2025-12-09T22:34:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2025-12-09T22:28:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2025-12-09T22:22:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2025-12-09T22:16:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2025-12-09T22:10:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2025-12-09T22:04:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2025-12-09T21:58:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2025-12-09T21:52:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2025-12-09T21:46:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2025-12-09T21:40:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2025-12-09T21:34:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2025-12-09T21:28:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2025-12-09T21:22:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2025-12-09T21:16:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2025-12-09T21:10:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2025-12-09T21:04:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2025-12-09T20:58:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2025-12-09T20:52:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2025-12-09T20:46:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2025-12-09T20:40:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2025-12-09T20:34:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2025-12-09T20:28:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2025-12-09T20:22:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2025-12-09T20:16:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2025-12-09T20:10:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2025-12-09T20:04:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2025-12-09T19:58:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2025-12-09T19:52:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2025-12-09T19:46:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2025-12-09T19:40:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2025-12-09T19:34:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2025-12-09T19:28:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2025-12-09T19:22:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2025-12-09T19:16:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2025-12-09T19:10:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2025-12-09T19:04:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2025-12-09T18:58:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2025-12-09T18:52:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2025-12-09T18:46:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2025-12-09T18:40:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2025-12-09T18:34:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2025-12-09T18:28:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2025-12-09T18:22:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2025-12-09T18:16:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2025-12-09T18:10:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2025-12-09T18:04:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2025-12-09T17:58:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2025-12-09T17:52:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2025-12-09T17:46:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2025-12-09T17:40:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2025-12-09T17:34:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2025-12-09T17:28:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2025-12-09T17:22:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2025-12-09T17:16:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2025-12-09T17:10:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2025-12-09T17:04:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2025-12-09T16:58:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2025-12-09T16:52:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2025-12-09T16:46:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2025-12-09T16:40:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2025-12-09T16:34:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2025-12-09T16:28:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2025-12-09T16:22:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2025-12-09T16:16:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2025-12-09T16:10:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2025-12-09T16:04:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2025-12-09T15:58:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2025-12-09T15:52:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2025-12-09T15:46:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2025-12-09T15:40:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2025-12-09T15:34:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2025-12-09T15:28:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2025-12-09T15:22:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2025-12-09T15:16:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2025-12-09T15:10:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2025-12-09T15:04:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2025-12-09T14:58:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2025-12-09T14:52:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2025-12-09T14:46:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2025-12-09T14:40:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2025-12-09T14:34:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2025-12-09T14:28:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2025-12-09T14:22:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2025-12-09T14:16:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2025-12-09T14:10:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2025-12-09T14:04:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2025-12-09T13:58:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2025-12-09T13:52:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2025-12-09T13:46:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2025-12-09T13:40:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2025-12-09T13:34:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2025-12-09T13:28:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2025-12-09T13:22:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2025-12-09T13:16:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2025-12-09T13:10:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2025-12-09T13:04:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2025-12-09T12:58:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2025-12-09T12:52:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2025-12-09T12:46:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2025-12-09T12:40:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2025-12-09T12:34:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2025-12-09T12:28:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2025-12-09T12:22:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2025-12-09T12:16:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2025-12-09T12:10:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2025-12-09T12:04:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2025-12-09T11:58:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2025-12-09T11:52:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2025-12-09T11:46:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2025-12-09T11:40:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2025-12-09T11:34:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2025-12-09T11:28:00Z" } ] [ { "time_frame": 360, "timestamp": "2025-12-09T23:22:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2025-12-09T23:16:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2025-12-09T23:10:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2025-12-09T23:04:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2025-12-09T22:58:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2025-12-09T22:52:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2025-12-09T22:46:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2025-12-09T22:40:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2025-12-09T22:34:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2025-12-09T22:28:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2025-12-09T22:22:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2025-12-09T22:16:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2025-12-09T22:10:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2025-12-09T22:04:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2025-12-09T21:58:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2025-12-09T21:52:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2025-12-09T21:46:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2025-12-09T21:40:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2025-12-09T21:34:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2025-12-09T21:28:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2025-12-09T21:22:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2025-12-09T21:16:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2025-12-09T21:10:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2025-12-09T21:04:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2025-12-09T20:58:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2025-12-09T20:52:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2025-12-09T20:46:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2025-12-09T20:40:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2025-12-09T20:34:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2025-12-09T20:28:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2025-12-09T20:22:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2025-12-09T20:16:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2025-12-09T20:10:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2025-12-09T20:04:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2025-12-09T19:58:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2025-12-09T19:52:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2025-12-09T19:46:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2025-12-09T19:40:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2025-12-09T19:34:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2025-12-09T19:28:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2025-12-09T19:22:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2025-12-09T19:16:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2025-12-09T19:10:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2025-12-09T19:04:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2025-12-09T18:58:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2025-12-09T18:52:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2025-12-09T18:46:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2025-12-09T18:40:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2025-12-09T18:34:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2025-12-09T18:28:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2025-12-09T18:22:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2025-12-09T18:16:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2025-12-09T18:10:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2025-12-09T18:04:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2025-12-09T17:58:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2025-12-09T17:52:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2025-12-09T17:46:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2025-12-09T17:40:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2025-12-09T17:34:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2025-12-09T17:28:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2025-12-09T17:22:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2025-12-09T17:16:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2025-12-09T17:10:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2025-12-09T17:04:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2025-12-09T16:58:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2025-12-09T16:52:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2025-12-09T16:46:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2025-12-09T16:40:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2025-12-09T16:34:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2025-12-09T16:28:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2025-12-09T16:22:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2025-12-09T16:16:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2025-12-09T16:10:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2025-12-09T16:04:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2025-12-09T15:58:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2025-12-09T15:52:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2025-12-09T15:46:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2025-12-09T15:40:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2025-12-09T15:34:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2025-12-09T15:28:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2025-12-09T15:22:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2025-12-09T15:16:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2025-12-09T15:10:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2025-12-09T15:04:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2025-12-09T14:58:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2025-12-09T14:52:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2025-12-09T14:46:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2025-12-09T14:40:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2025-12-09T14:34:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2025-12-09T14:28:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2025-12-09T14:22:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2025-12-09T14:16:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2025-12-09T14:10:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2025-12-09T14:04:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2025-12-09T13:58:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2025-12-09T13:52:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2025-12-09T13:46:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2025-12-09T13:40:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2025-12-09T13:34:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2025-12-09T13:28:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2025-12-09T13:22:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2025-12-09T13:16:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2025-12-09T13:10:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2025-12-09T13:04:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2025-12-09T12:58:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2025-12-09T12:52:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2025-12-09T12:46:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2025-12-09T12:40:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2025-12-09T12:34:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2025-12-09T12:28:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2025-12-09T12:22:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2025-12-09T12:16:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2025-12-09T12:10:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2025-12-09T12:04:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2025-12-09T11:58:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2025-12-09T11:52:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2025-12-09T11:46:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2025-12-09T11:40:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2025-12-09T11:34:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2025-12-09T11:28:00Z" } ] [ { "time_frame": 360, "timestamp": "2025-12-09T23:22:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2025-12-09T23:16:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2025-12-09T23:10:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2025-12-09T23:04:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2025-12-09T22:58:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2025-12-09T22:52:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2025-12-09T22:46:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2025-12-09T22:40:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2025-12-09T22:34:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2025-12-09T22:28:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2025-12-09T22:22:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2025-12-09T22:16:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2025-12-09T22:10:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2025-12-09T22:04:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2025-12-09T21:58:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2025-12-09T21:52:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2025-12-09T21:46:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2025-12-09T21:40:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2025-12-09T21:34:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2025-12-09T21:28:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2025-12-09T21:22:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2025-12-09T21:16:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2025-12-09T21:10:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2025-12-09T21:04:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2025-12-09T20:58:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2025-12-09T20:52:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2025-12-09T20:46:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2025-12-09T20:40:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2025-12-09T20:34:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2025-12-09T20:28:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2025-12-09T20:22:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2025-12-09T20:16:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2025-12-09T20:10:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2025-12-09T20:04:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2025-12-09T19:58:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2025-12-09T19:52:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2025-12-09T19:46:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2025-12-09T19:40:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2025-12-09T19:34:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2025-12-09T19:28:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2025-12-09T19:22:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2025-12-09T19:16:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2025-12-09T19:10:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2025-12-09T19:04:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2025-12-09T18:58:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2025-12-09T18:52:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2025-12-09T18:46:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2025-12-09T18:40:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2025-12-09T18:34:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2025-12-09T18:28:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2025-12-09T18:22:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2025-12-09T18:16:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2025-12-09T18:10:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2025-12-09T18:04:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2025-12-09T17:58:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2025-12-09T17:52:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2025-12-09T17:46:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2025-12-09T17:40:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2025-12-09T17:34:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2025-12-09T17:28:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2025-12-09T17:22:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2025-12-09T17:16:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2025-12-09T17:10:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2025-12-09T17:04:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2025-12-09T16:58:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2025-12-09T16:52:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2025-12-09T16:46:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2025-12-09T16:40:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2025-12-09T16:34:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2025-12-09T16:28:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2025-12-09T16:22:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2025-12-09T16:16:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2025-12-09T16:10:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2025-12-09T16:04:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2025-12-09T15:58:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2025-12-09T15:52:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2025-12-09T15:46:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2025-12-09T15:40:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2025-12-09T15:34:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2025-12-09T15:28:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2025-12-09T15:22:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2025-12-09T15:16:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2025-12-09T15:10:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2025-12-09T15:04:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2025-12-09T14:58:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2025-12-09T14:52:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2025-12-09T14:46:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2025-12-09T14:40:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2025-12-09T14:34:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2025-12-09T14:28:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2025-12-09T14:22:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2025-12-09T14:16:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2025-12-09T14:10:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2025-12-09T14:04:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2025-12-09T13:58:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2025-12-09T13:52:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2025-12-09T13:46:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2025-12-09T13:40:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2025-12-09T13:34:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2025-12-09T13:28:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2025-12-09T13:22:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2025-12-09T13:16:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2025-12-09T13:10:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2025-12-09T13:04:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2025-12-09T12:58:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2025-12-09T12:52:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2025-12-09T12:46:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2025-12-09T12:40:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2025-12-09T12:34:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2025-12-09T12:28:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2025-12-09T12:22:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2025-12-09T12:16:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2025-12-09T12:10:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2025-12-09T12:04:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2025-12-09T11:58:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2025-12-09T11:52:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2025-12-09T11:46:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2025-12-09T11:40:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2025-12-09T11:34:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2025-12-09T11:28:00Z" } ] != \n\u\l\l ]] + rm -f payload.json + get_qan_values mongodb dev-mongos admin:admin + local service_type=mongodb + local environment=dev-mongos + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/sbin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2025-12-09T11:23:01+00:00 ++ /usr/sbin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2025-12-09T23:23:01+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IBi81MefY8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.attkk5enak +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.IBi81MefY8 +++ cat /tmp/tmp.attkk5enak +++ rm /tmp/tmp.IBi81MefY8 /tmp/tmp.attkk5enak +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.oGIVGUxldZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pWZVBPrDRu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.oGIVGUxldZ +++ cat /tmp/tmp.pWZVBPrDRu +++ rm /tmp/tmp.oGIVGUxldZ /tmp/tmp.pWZVBPrDRu +++ return 0 ++ local ip=35.202.20.27 ++ '[' -n 35.202.20.27 -a 35.202.20.27 '!=' null ']' ++ echo 35.202.20.27 ++ return + endpoint=35.202.20.27 + cat + local response + retry=0 ++ curl -s -k -XPOST -d @payload.json https://admin:admin@35.202.20.27/v0/qan/GetReport ++ jq '.rows[].sparkline' + [[ [ { "time_frame": 360, "timestamp": "2025-12-09T23:23:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2025-12-09T23:17:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2025-12-09T23:11:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2025-12-09T23:05:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2025-12-09T22:59:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2025-12-09T22:53:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2025-12-09T22:47:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2025-12-09T22:41:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2025-12-09T22:35:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2025-12-09T22:29:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2025-12-09T22:23:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2025-12-09T22:17:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2025-12-09T22:11:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2025-12-09T22:05:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2025-12-09T21:59:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2025-12-09T21:53:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2025-12-09T21:47:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2025-12-09T21:41:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2025-12-09T21:35:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2025-12-09T21:29:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2025-12-09T21:23:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2025-12-09T21:17:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2025-12-09T21:11:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2025-12-09T21:05:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2025-12-09T20:59:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2025-12-09T20:53:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2025-12-09T20:47:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2025-12-09T20:41:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2025-12-09T20:35:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2025-12-09T20:29:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2025-12-09T20:23:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2025-12-09T20:17:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2025-12-09T20:11:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2025-12-09T20:05:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2025-12-09T19:59:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2025-12-09T19:53:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2025-12-09T19:47:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2025-12-09T19:41:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2025-12-09T19:35:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2025-12-09T19:29:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2025-12-09T19:23:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2025-12-09T19:17:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2025-12-09T19:11:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2025-12-09T19:05:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2025-12-09T18:59:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2025-12-09T18:53:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2025-12-09T18:47:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2025-12-09T18:41:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2025-12-09T18:35:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2025-12-09T18:29:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2025-12-09T18:23:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2025-12-09T18:17:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2025-12-09T18:11:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2025-12-09T18:05:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2025-12-09T17:59:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2025-12-09T17:53:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2025-12-09T17:47:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2025-12-09T17:41:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2025-12-09T17:35:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2025-12-09T17:29:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2025-12-09T17:23:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2025-12-09T17:17:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2025-12-09T17:11:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2025-12-09T17:05:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2025-12-09T16:59:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2025-12-09T16:53:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2025-12-09T16:47:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2025-12-09T16:41:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2025-12-09T16:35:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2025-12-09T16:29:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2025-12-09T16:23:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2025-12-09T16:17:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2025-12-09T16:11:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2025-12-09T16:05:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2025-12-09T15:59:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2025-12-09T15:53:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2025-12-09T15:47:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2025-12-09T15:41:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2025-12-09T15:35:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2025-12-09T15:29:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2025-12-09T15:23:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2025-12-09T15:17:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2025-12-09T15:11:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2025-12-09T15:05:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2025-12-09T14:59:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2025-12-09T14:53:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2025-12-09T14:47:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2025-12-09T14:41:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2025-12-09T14:35:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2025-12-09T14:29:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2025-12-09T14:23:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2025-12-09T14:17:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2025-12-09T14:11:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2025-12-09T14:05:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2025-12-09T13:59:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2025-12-09T13:53:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2025-12-09T13:47:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2025-12-09T13:41:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2025-12-09T13:35:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2025-12-09T13:29:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2025-12-09T13:23:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2025-12-09T13:17:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2025-12-09T13:11:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2025-12-09T13:05:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2025-12-09T12:59:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2025-12-09T12:53:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2025-12-09T12:47:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2025-12-09T12:41:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2025-12-09T12:35:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2025-12-09T12:29:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2025-12-09T12:23:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2025-12-09T12:17:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2025-12-09T12:11:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2025-12-09T12:05:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2025-12-09T11:59:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2025-12-09T11:53:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2025-12-09T11:47:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2025-12-09T11:41:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2025-12-09T11:35:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2025-12-09T11:29:00Z" } ] != \n\u\l\l ]] + rm -f payload.json + nodeList=($(get_node_id_from_pmm)) ++ get_node_id_from_pmm ++ nodeList=() ++ local -a nodeList +++ kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4cDATps5Wa ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SfbOdprJQv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4cDATps5Wa +++ cat /tmp/tmp.SfbOdprJQv +++ rm /tmp/tmp.4cDATps5Wa /tmp/tmp.SfbOdprJQv +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JGA63Ivypf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NzM1oWCHS2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.JGA63Ivypf +++ cat /tmp/tmp.NzM1oWCHS2 +++ rm /tmp/tmp.JGA63Ivypf /tmp/tmp.NzM1oWCHS2 +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fMdE6GcQs6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GpmopaG1fU +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.fMdE6GcQs6 +++ cat /tmp/tmp.GpmopaG1fU +++ rm /tmp/tmp.fMdE6GcQs6 /tmp/tmp.GpmopaG1fU +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.tcjEfgjITa ++++ mktemp +++ local LAST_ERR=/tmp/tmp.daio9auXYR +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.tcjEfgjITa +++ cat /tmp/tmp.daio9auXYR +++ rm /tmp/tmp.tcjEfgjITa /tmp/tmp.daio9auXYR +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.erApm0jIue ++++ mktemp +++ local LAST_ERR=/tmp/tmp.euHgvRR9PS +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.erApm0jIue +++ cat /tmp/tmp.euHgvRR9PS +++ rm /tmp/tmp.erApm0jIue /tmp/tmp.euHgvRR9PS +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kLLsNDCyLj ++++ mktemp +++ local LAST_ERR=/tmp/tmp.93wCZRsSKk +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kLLsNDCyLj +++ cat /tmp/tmp.93wCZRsSKk +++ rm /tmp/tmp.kLLsNDCyLj /tmp/tmp.93wCZRsSKk +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IwWS5bCc7K ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BcQR1ri9fI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.IwWS5bCc7K +++ cat /tmp/tmp.BcQR1ri9fI +++ rm /tmp/tmp.IwWS5bCc7K /tmp/tmp.BcQR1ri9fI +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UDow06Hu7B ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ii7ww64dMH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.UDow06Hu7B +++ cat /tmp/tmp.ii7ww64dMH +++ rm /tmp/tmp.UDow06Hu7B /tmp/tmp.ii7ww64dMH +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DcR0WdtIL5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KBqJ9cfwFN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.DcR0WdtIL5 +++ cat /tmp/tmp.KBqJ9cfwFN +++ rm /tmp/tmp.DcR0WdtIL5 /tmp/tmp.KBqJ9cfwFN +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FbYFNWsdbb ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qr4km6EIjp +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.FbYFNWsdbb +++ cat /tmp/tmp.qr4km6EIjp +++ rm /tmp/tmp.FbYFNWsdbb /tmp/tmp.qr4km6EIjp +++ return 0 ++ echo /node_id/7933cf1b-7f0b-4e47-8a62-ad50f0c7321a /node_id/a0ae6cb6-13dd-45b9-9c59-9bbdbfcfcfb9 /node_id/de7409af-b80c-4981-9752-00e6a5f0c874 /node_id/99c14fbc-e750-4f85-acd1-73fac56064b1 /node_id/aa87eadd-c501-443e-8773-4238c39fabb7 /node_id/18d9ad9f-2728-4603-a031-f258532c99a2 /node_id/a4afe891-4de1-4484-82a8-c4ee39ac64f9 /node_id/c94ff103-19b1-4fb6-bd7b-1ebce5224fd8 /node_id/09304519-8a93-4629-9d44-4531f5b8fadd + nodeList_from_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/7933cf1b-7f0b-4e47-8a62-ad50f0c7321a /node_id/a0ae6cb6-13dd-45b9-9c59-9bbdbfcfcfb9 /node_id/de7409af-b80c-4981-9752-00e6a5f0c874 /node_id/99c14fbc-e750-4f85-acd1-73fac56064b1 /node_id/aa87eadd-c501-443e-8773-4238c39fabb7 /node_id/18d9ad9f-2728-4603-a031-f258532c99a2 /node_id/a4afe891-4de1-4484-82a8-c4ee39ac64f9 /node_id/c94ff103-19b1-4fb6-bd7b-1ebce5224fd8 /node_id/09304519-8a93-4629-9d44-4531f5b8fadd ++ nodeList=('/node_id/7933cf1b-7f0b-4e47-8a62-ad50f0c7321a' '/node_id/a0ae6cb6-13dd-45b9-9c59-9bbdbfcfcfb9' '/node_id/de7409af-b80c-4981-9752-00e6a5f0c874' '/node_id/99c14fbc-e750-4f85-acd1-73fac56064b1' '/node_id/aa87eadd-c501-443e-8773-4238c39fabb7' '/node_id/18d9ad9f-2728-4603-a031-f258532c99a2' '/node_id/a4afe891-4de1-4484-82a8-c4ee39ac64f9' '/node_id/c94ff103-19b1-4fb6-bd7b-1ebce5224fd8' '/node_id/09304519-8a93-4629-9d44-4531f5b8fadd') ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep /node_id/7933cf1b-7f0b-4e47-8a62-ad50f0c7321a ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.oBNVOCo0qc +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.n6XKEtIza9 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.oBNVOCo0qc ++++ cat /tmp/tmp.n6XKEtIza9 ++++ rm /tmp/tmp.oBNVOCo0qc /tmp/tmp.n6XKEtIza9 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.PY8UJ33c7l +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.19K05vmYSx ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.PY8UJ33c7l ++++ cat /tmp/tmp.19K05vmYSx ++++ rm /tmp/tmp.PY8UJ33c7l /tmp/tmp.19K05vmYSx ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DlcA7GXxKn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3EUoVAIbyD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.DlcA7GXxKn +++ cat /tmp/tmp.3EUoVAIbyD +++ rm /tmp/tmp.DlcA7GXxKn /tmp/tmp.3EUoVAIbyD +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/a0ae6cb6-13dd-45b9-9c59-9bbdbfcfcfb9 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.kvjF8Kff0e +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.7cvmq7ax7G ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.kvjF8Kff0e ++++ cat /tmp/tmp.7cvmq7ax7G ++++ rm /tmp/tmp.kvjF8Kff0e /tmp/tmp.7cvmq7ax7G ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.OSZ7fE7TKo +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Cn5MysO0sJ ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.OSZ7fE7TKo ++++ cat /tmp/tmp.Cn5MysO0sJ ++++ rm /tmp/tmp.OSZ7fE7TKo /tmp/tmp.Cn5MysO0sJ ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uoj0QsXRlv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.heAMWw3aGu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.uoj0QsXRlv +++ cat /tmp/tmp.heAMWw3aGu +++ rm /tmp/tmp.uoj0QsXRlv /tmp/tmp.heAMWw3aGu +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/de7409af-b80c-4981-9752-00e6a5f0c874 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.aqjYu354SP +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.usZ7jvwxE9 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.aqjYu354SP ++++ cat /tmp/tmp.usZ7jvwxE9 ++++ rm /tmp/tmp.aqjYu354SP /tmp/tmp.usZ7jvwxE9 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.HXwOBxj5h8 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.2tPauK9RXX ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.HXwOBxj5h8 ++++ cat /tmp/tmp.2tPauK9RXX ++++ rm /tmp/tmp.HXwOBxj5h8 /tmp/tmp.2tPauK9RXX ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.yaLZ0tDFsz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1l4Juwi7rL +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.yaLZ0tDFsz +++ cat /tmp/tmp.1l4Juwi7rL +++ rm /tmp/tmp.yaLZ0tDFsz /tmp/tmp.1l4Juwi7rL +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/99c14fbc-e750-4f85-acd1-73fac56064b1 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.p0h6Q3lwAm +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Hy6FZ0yqv4 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.p0h6Q3lwAm ++++ cat /tmp/tmp.Hy6FZ0yqv4 ++++ rm /tmp/tmp.p0h6Q3lwAm /tmp/tmp.Hy6FZ0yqv4 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.MRm6QDsgfw +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.tPX0iS8Uvo ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.MRm6QDsgfw ++++ cat /tmp/tmp.tPX0iS8Uvo ++++ rm /tmp/tmp.MRm6QDsgfw /tmp/tmp.tPX0iS8Uvo ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hMMJAhLdjg ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cYyao8TrGY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.hMMJAhLdjg +++ cat /tmp/tmp.cYyao8TrGY +++ rm /tmp/tmp.hMMJAhLdjg /tmp/tmp.cYyao8TrGY +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep /node_id/aa87eadd-c501-443e-8773-4238c39fabb7 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.N6qzLZwOyf +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.xx5BhHgp5p ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.N6qzLZwOyf ++++ cat /tmp/tmp.xx5BhHgp5p ++++ rm /tmp/tmp.N6qzLZwOyf /tmp/tmp.xx5BhHgp5p ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.2fXyCqYrNW +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.NTm3OcWGxT ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.2fXyCqYrNW ++++ cat /tmp/tmp.NTm3OcWGxT ++++ rm /tmp/tmp.2fXyCqYrNW /tmp/tmp.NTm3OcWGxT ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QGSMCYhq1v ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QILcJPmnfE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.QGSMCYhq1v +++ cat /tmp/tmp.QILcJPmnfE +++ rm /tmp/tmp.QGSMCYhq1v /tmp/tmp.QILcJPmnfE +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/18d9ad9f-2728-4603-a031-f258532c99a2 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.KM5aVNX3id +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.xz5NeGge4B ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.KM5aVNX3id ++++ cat /tmp/tmp.xz5NeGge4B ++++ rm /tmp/tmp.KM5aVNX3id /tmp/tmp.xz5NeGge4B ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.VdiBv01fou +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.uX0l7PWP67 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.VdiBv01fou ++++ cat /tmp/tmp.uX0l7PWP67 ++++ rm /tmp/tmp.VdiBv01fou /tmp/tmp.uX0l7PWP67 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Z5WtgPiDhD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KkckOnN3v1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Z5WtgPiDhD +++ cat /tmp/tmp.KkckOnN3v1 +++ rm /tmp/tmp.Z5WtgPiDhD /tmp/tmp.KkckOnN3v1 +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/a4afe891-4de1-4484-82a8-c4ee39ac64f9 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.8bIEHBRvC3 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.edeC8d0Ldl ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.8bIEHBRvC3 ++++ cat /tmp/tmp.edeC8d0Ldl ++++ rm /tmp/tmp.8bIEHBRvC3 /tmp/tmp.edeC8d0Ldl ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.C3OxWnAZKZ +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.P32aibvx2N ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.C3OxWnAZKZ ++++ cat /tmp/tmp.P32aibvx2N ++++ rm /tmp/tmp.C3OxWnAZKZ /tmp/tmp.P32aibvx2N ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.bLGhilJsGK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xWefK0HtwG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.bLGhilJsGK +++ cat /tmp/tmp.xWefK0HtwG +++ rm /tmp/tmp.bLGhilJsGK /tmp/tmp.xWefK0HtwG +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/c94ff103-19b1-4fb6-bd7b-1ebce5224fd8 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.62vKBtQT4H +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.NMjO2faruJ ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.62vKBtQT4H ++++ cat /tmp/tmp.NMjO2faruJ ++++ rm /tmp/tmp.62vKBtQT4H /tmp/tmp.NMjO2faruJ ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.bpgEJHWalM +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.INjCWF0XVj ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.bpgEJHWalM ++++ cat /tmp/tmp.INjCWF0XVj ++++ rm /tmp/tmp.bpgEJHWalM /tmp/tmp.INjCWF0XVj ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.eqkhw88Vlc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.O2C6uLnjrG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.eqkhw88Vlc +++ cat /tmp/tmp.O2C6uLnjrG +++ rm /tmp/tmp.eqkhw88Vlc /tmp/tmp.O2C6uLnjrG +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/09304519-8a93-4629-9d44-4531f5b8fadd +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.X23yMkzeHA +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.aODdPv89bi ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.X23yMkzeHA ++++ cat /tmp/tmp.aODdPv89bi ++++ rm /tmp/tmp.X23yMkzeHA /tmp/tmp.aODdPv89bi ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.dn54PZJunm +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.EevSr7gLmL ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.dn54PZJunm ++++ cat /tmp/tmp.EevSr7gLmL ++++ rm /tmp/tmp.dn54PZJunm /tmp/tmp.EevSr7gLmL ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6IZiFTYcRS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.N76eJ6P50B +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6IZiFTYcRS +++ cat /tmp/tmp.N76eJ6P50B +++ rm /tmp/tmp.6IZiFTYcRS /tmp/tmp.N76eJ6P50B +++ return 0 ++ echo /node_id/7933cf1b-7f0b-4e47-8a62-ad50f0c7321a /node_id/a0ae6cb6-13dd-45b9-9c59-9bbdbfcfcfb9 /node_id/de7409af-b80c-4981-9752-00e6a5f0c874 /node_id/99c14fbc-e750-4f85-acd1-73fac56064b1 /node_id/aa87eadd-c501-443e-8773-4238c39fabb7 /node_id/18d9ad9f-2728-4603-a031-f258532c99a2 /node_id/a4afe891-4de1-4484-82a8-c4ee39ac64f9 /node_id/c94ff103-19b1-4fb6-bd7b-1ebce5224fd8 /node_id/09304519-8a93-4629-9d44-4531f5b8fadd + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/7933cf1b-7f0b-4e47-8a62-ad50f0c7321a ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/a0ae6cb6-13dd-45b9-9c59-9bbdbfcfcfb9 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/de7409af-b80c-4981-9752-00e6a5f0c874 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/99c14fbc-e750-4f85-acd1-73fac56064b1 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/aa87eadd-c501-443e-8773-4238c39fabb7 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/18d9ad9f-2728-4603-a031-f258532c99a2 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/a4afe891-4de1-4484-82a8-c4ee39ac64f9 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/c94ff103-19b1-4fb6-bd7b-1ebce5224fd8 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/09304519-8a93-4629-9d44-4531f5b8fadd ']' + kubectl_bin patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.iiMzZLzzw6 ++ mktemp + local LAST_ERR=/tmp/tmp.I1kHQ7HtFE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iiMzZLzzw6 perconaservermongodb.psmdb.percona.com/monitoring patched + cat /tmp/tmp.I1kHQ7HtFE + rm /tmp/tmp.iiMzZLzzw6 /tmp/tmp.I1kHQ7HtFE + return 0 + wait_for_delete pod/monitoring-mongos-0 + local res=pod/monitoring-mongos-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-mongos-0 to be deleted...........................Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found + wait_for_delete pod/monitoring-rs0-0 + local res=pod/monitoring-rs0-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-rs0-0 to be deleted...........Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found + wait_for_delete pod/monitoring-cfg-0 + local res=pod/monitoring-cfg-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-cfg-0 to be deleted......Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found + desc 'check if services are not deleted' + set +o xtrace ----------------------------------------------------------------------------------- check if services are not deleted ----------------------------------------------------------------------------------- + kubectl_bin get svc monitoring-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.EtXoUHBgky ++ mktemp + local LAST_ERR=/tmp/tmp.OpawjL4Ejd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EtXoUHBgky NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-rs0 ClusterIP None 27019/TCP 14m + cat /tmp/tmp.OpawjL4Ejd + rm /tmp/tmp.EtXoUHBgky /tmp/tmp.OpawjL4Ejd + return 0 + kubectl_bin get svc monitoring-cfg ++ mktemp + local LAST_OUT=/tmp/tmp.Z72wtu303s ++ mktemp + local LAST_ERR=/tmp/tmp.PGaybS6dIx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Z72wtu303s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-cfg ClusterIP None 27019/TCP 14m + cat /tmp/tmp.PGaybS6dIx + rm /tmp/tmp.Z72wtu303s /tmp/tmp.PGaybS6dIx + return 0 + kubectl_bin get svc monitoring-mongos ++ mktemp + local LAST_OUT=/tmp/tmp.t9oqNJFS7A ++ mktemp + local LAST_ERR=/tmp/tmp.NFHuvQJHHX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.t9oqNJFS7A NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-mongos ClusterIP 34.118.228.159 27019/TCP 14m + cat /tmp/tmp.NFHuvQJHHX + rm /tmp/tmp.t9oqNJFS7A /tmp/tmp.NFHuvQJHHX + return 0 + does_node_id_exists_in_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/7933cf1b-7f0b-4e47-8a62-ad50f0c7321a /node_id/a0ae6cb6-13dd-45b9-9c59-9bbdbfcfcfb9 /node_id/de7409af-b80c-4981-9752-00e6a5f0c874 /node_id/99c14fbc-e750-4f85-acd1-73fac56064b1 /node_id/aa87eadd-c501-443e-8773-4238c39fabb7 /node_id/18d9ad9f-2728-4603-a031-f258532c99a2 /node_id/a4afe891-4de1-4484-82a8-c4ee39ac64f9 /node_id/c94ff103-19b1-4fb6-bd7b-1ebce5224fd8 /node_id/09304519-8a93-4629-9d44-4531f5b8fadd ++ nodeList=('/node_id/7933cf1b-7f0b-4e47-8a62-ad50f0c7321a' '/node_id/a0ae6cb6-13dd-45b9-9c59-9bbdbfcfcfb9' '/node_id/de7409af-b80c-4981-9752-00e6a5f0c874' '/node_id/99c14fbc-e750-4f85-acd1-73fac56064b1' '/node_id/aa87eadd-c501-443e-8773-4238c39fabb7' '/node_id/18d9ad9f-2728-4603-a031-f258532c99a2' '/node_id/a4afe891-4de1-4484-82a8-c4ee39ac64f9' '/node_id/c94ff103-19b1-4fb6-bd7b-1ebce5224fd8' '/node_id/09304519-8a93-4629-9d44-4531f5b8fadd') ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/7933cf1b-7f0b-4e47-8a62-ad50f0c7321a +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.b5kT46ILzJ +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.HzJwlBwLSj ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.b5kT46ILzJ ++++ cat /tmp/tmp.HzJwlBwLSj ++++ rm /tmp/tmp.b5kT46ILzJ /tmp/tmp.HzJwlBwLSj ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.iyVUlGsJuy +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Lkhvees1fi ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.iyVUlGsJuy ++++ cat /tmp/tmp.Lkhvees1fi ++++ rm /tmp/tmp.iyVUlGsJuy /tmp/tmp.Lkhvees1fi ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.siFblEdS6P ++++ mktemp +++ local LAST_ERR=/tmp/tmp.S4vEdV15vM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.siFblEdS6P +++ cat /tmp/tmp.S4vEdV15vM +++ rm /tmp/tmp.siFblEdS6P /tmp/tmp.S4vEdV15vM +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/a0ae6cb6-13dd-45b9-9c59-9bbdbfcfcfb9 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Taa8aIzn83 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.tRUY23qglS ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.Taa8aIzn83 ++++ cat /tmp/tmp.tRUY23qglS ++++ rm /tmp/tmp.Taa8aIzn83 /tmp/tmp.tRUY23qglS ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.TmdCb9QNkz +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.0AXNVRUCsR ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.TmdCb9QNkz ++++ cat /tmp/tmp.0AXNVRUCsR ++++ rm /tmp/tmp.TmdCb9QNkz /tmp/tmp.0AXNVRUCsR ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OBAjErvFNj ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mNg87FxC7H +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.OBAjErvFNj +++ cat /tmp/tmp.mNg87FxC7H +++ rm /tmp/tmp.OBAjErvFNj /tmp/tmp.mNg87FxC7H +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/de7409af-b80c-4981-9752-00e6a5f0c874 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.fTmUFdKzKf +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.C8s80dqybN ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.fTmUFdKzKf ++++ cat /tmp/tmp.C8s80dqybN ++++ rm /tmp/tmp.fTmUFdKzKf /tmp/tmp.C8s80dqybN ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.CCoZUKrFnq +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.eoYbGpMRfd ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.CCoZUKrFnq ++++ cat /tmp/tmp.eoYbGpMRfd ++++ rm /tmp/tmp.CCoZUKrFnq /tmp/tmp.eoYbGpMRfd ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Frj3VXolaL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vzbCDPviqX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Frj3VXolaL +++ cat /tmp/tmp.vzbCDPviqX +++ rm /tmp/tmp.Frj3VXolaL /tmp/tmp.vzbCDPviqX +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/99c14fbc-e750-4f85-acd1-73fac56064b1 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.m8WmFph9Ii +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.LKwl5Jdfbx ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.m8WmFph9Ii ++++ cat /tmp/tmp.LKwl5Jdfbx ++++ rm /tmp/tmp.m8WmFph9Ii /tmp/tmp.LKwl5Jdfbx ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.aws277zAcl +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.dpWa3RIY0s ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.aws277zAcl ++++ cat /tmp/tmp.dpWa3RIY0s ++++ rm /tmp/tmp.aws277zAcl /tmp/tmp.dpWa3RIY0s ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mgII1KJADR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9Y7WI6WBlh +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.mgII1KJADR +++ cat /tmp/tmp.9Y7WI6WBlh +++ rm /tmp/tmp.mgII1KJADR /tmp/tmp.9Y7WI6WBlh +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/aa87eadd-c501-443e-8773-4238c39fabb7 ++++ get_pmm_service_ip monitoring-service +++ awk '{print $4}' ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.JSS1l0ennC +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.mot7TQiZl8 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.JSS1l0ennC ++++ cat /tmp/tmp.mot7TQiZl8 ++++ rm /tmp/tmp.JSS1l0ennC /tmp/tmp.mot7TQiZl8 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.gC826UPGyT +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.4DdiNIoglB ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.gC826UPGyT ++++ cat /tmp/tmp.4DdiNIoglB ++++ rm /tmp/tmp.gC826UPGyT /tmp/tmp.4DdiNIoglB ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gzRVFFTX0o ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qsBJjcMWPE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.gzRVFFTX0o +++ cat /tmp/tmp.qsBJjcMWPE +++ rm /tmp/tmp.gzRVFFTX0o /tmp/tmp.qsBJjcMWPE +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/18d9ad9f-2728-4603-a031-f258532c99a2 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.36hLDP9H1e +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.qqJzAcG91a ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.36hLDP9H1e ++++ cat /tmp/tmp.qqJzAcG91a ++++ rm /tmp/tmp.36hLDP9H1e /tmp/tmp.qqJzAcG91a ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.9wpqMzkRY7 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.jFiOgT3jo4 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.9wpqMzkRY7 ++++ cat /tmp/tmp.jFiOgT3jo4 ++++ rm /tmp/tmp.9wpqMzkRY7 /tmp/tmp.jFiOgT3jo4 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kilrKT484X ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iWE90ZJc7a +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kilrKT484X +++ cat /tmp/tmp.iWE90ZJc7a +++ rm /tmp/tmp.kilrKT484X /tmp/tmp.iWE90ZJc7a +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/a4afe891-4de1-4484-82a8-c4ee39ac64f9 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.bFcVZVsyji +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ddlMaBpn4z ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.bFcVZVsyji ++++ cat /tmp/tmp.ddlMaBpn4z ++++ rm /tmp/tmp.bFcVZVsyji /tmp/tmp.ddlMaBpn4z ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.T0IX2CypUX +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.iNTPHNFVrk ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.T0IX2CypUX ++++ cat /tmp/tmp.iNTPHNFVrk ++++ rm /tmp/tmp.T0IX2CypUX /tmp/tmp.iNTPHNFVrk ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0O48bvUEkq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4fPOnzTGCS +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.0O48bvUEkq +++ cat /tmp/tmp.4fPOnzTGCS +++ rm /tmp/tmp.0O48bvUEkq /tmp/tmp.4fPOnzTGCS +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/c94ff103-19b1-4fb6-bd7b-1ebce5224fd8 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.724AFaIhwp +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.RQ4fma5CG4 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.724AFaIhwp ++++ cat /tmp/tmp.RQ4fma5CG4 ++++ rm /tmp/tmp.724AFaIhwp /tmp/tmp.RQ4fma5CG4 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.FAVDPD5Asx +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.F55FuJGXas ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.FAVDPD5Asx ++++ cat /tmp/tmp.F55FuJGXas ++++ rm /tmp/tmp.FAVDPD5Asx /tmp/tmp.F55FuJGXas ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.C3p2Dm1YAT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RfreoI9ZyJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.C3p2Dm1YAT +++ cat /tmp/tmp.RfreoI9ZyJ +++ rm /tmp/tmp.C3p2Dm1YAT /tmp/tmp.RfreoI9ZyJ +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/09304519-8a93-4629-9d44-4531f5b8fadd +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' egrep: warning: egrep is obsolescent; using grep -E ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.jGrC7smHbQ +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.sP3uc5C7fF ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.jGrC7smHbQ ++++ cat /tmp/tmp.sP3uc5C7fF ++++ rm /tmp/tmp.jGrC7smHbQ /tmp/tmp.sP3uc5C7fF ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.hcA23UIWzd +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.tK5nOgn6aT ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.hcA23UIWzd ++++ cat /tmp/tmp.tK5nOgn6aT ++++ rm /tmp/tmp.hcA23UIWzd /tmp/tmp.tK5nOgn6aT ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gVv3ENw46u ++++ mktemp +++ local LAST_ERR=/tmp/tmp.psjgyaZJHm +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-3412 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.202.20.27/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.gVv3ENw46u +++ cat /tmp/tmp.psjgyaZJHm +++ rm /tmp/tmp.gVv3ENw46u /tmp/tmp.psjgyaZJHm +++ return 0 ++ echo + desc 'check customClusterName for pmm' + set +o xtrace ----------------------------------------------------------------------------------- check customClusterName for pmm ----------------------------------------------------------------------------------- + custom_name=custom-cluster-name + kubectl_bin patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":false}, {"op":"add","path":"/spec/pmm/customClusterName","value":custom-cluster-name}]' ++ mktemp + local LAST_OUT=/tmp/tmp.IQO1seEOAZ ++ mktemp + local LAST_ERR=/tmp/tmp.rxbh8gkFpb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":false}, {"op":"add","path":"/spec/pmm/customClusterName","value":custom-cluster-name}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IQO1seEOAZ perconaservermongodb.psmdb.percona.com/monitoring patched + cat /tmp/tmp.rxbh8gkFpb + rm /tmp/tmp.IQO1seEOAZ /tmp/tmp.rxbh8gkFpb + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready..........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.........OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.US9D9JiBLT +++ mktemp ++ local LAST_ERR=/tmp/tmp.rilbXnhFaN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.US9D9JiBLT ++ cat /tmp/tmp.rilbXnhFaN ++ rm /tmp/tmp.US9D9JiBLT /tmp/tmp.rilbXnhFaN ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready..........OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0p4Mt9sMod +++ mktemp ++ local LAST_ERR=/tmp/tmp.GeZu6kh79T ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0p4Mt9sMod ++ cat /tmp/tmp.GeZu6kh79T ++ rm /tmp/tmp.0p4Mt9sMod /tmp/tmp.GeZu6kh79T ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MZlZlk0M3p +++ mktemp ++ local LAST_ERR=/tmp/tmp.kHT2cDoiuT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MZlZlk0M3p ++ cat /tmp/tmp.kHT2cDoiuT ++ rm /tmp/tmp.MZlZlk0M3p /tmp/tmp.kHT2cDoiuT ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness....... ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UbJ4R2y0W6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3eitVdNa3u +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.UbJ4R2y0W6 +++ cat /tmp/tmp.3eitVdNa3u +++ rm /tmp/tmp.UbJ4R2y0W6 /tmp/tmp.3eitVdNa3u +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.asib7eqn8C ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7FK9CgT1bx +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.asib7eqn8C +++ cat /tmp/tmp.7FK9CgT1bx +++ rm /tmp/tmp.asib7eqn8C /tmp/tmp.7FK9CgT1bx +++ return 0 ++ local ip=35.202.20.27 ++ '[' -n 35.202.20.27 -a 35.202.20.27 '!=' null ']' ++ echo 35.202.20.27 ++ return + curl -s -k -d '{"service_type":"MONGODB_SERVICE"}' https://admin:admin@35.202.20.27/v1/inventory/Services/List + check_custom_cluster_name monitoring-2-0-3412-monitoring-mongos-0 /tmp/tmp.DuPpQJQlN6/pmm_service_list.json + local pod_service_name=monitoring-2-0-3412-monitoring-mongos-0 + local pmm_services_file=/tmp/tmp.DuPpQJQlN6/pmm_service_list.json + echo 'Checking monitoring-2-0-3412-monitoring-mongos-0' Checking monitoring-2-0-3412-monitoring-mongos-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-3412-monitoring-mongos-0") | .cluster' /tmp/tmp.DuPpQJQlN6/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + check_custom_cluster_name monitoring-2-0-3412-monitoring-rs0-0 /tmp/tmp.DuPpQJQlN6/pmm_service_list.json + local pod_service_name=monitoring-2-0-3412-monitoring-rs0-0 + local pmm_services_file=/tmp/tmp.DuPpQJQlN6/pmm_service_list.json + echo 'Checking monitoring-2-0-3412-monitoring-rs0-0' Checking monitoring-2-0-3412-monitoring-rs0-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-3412-monitoring-rs0-0") | .cluster' /tmp/tmp.DuPpQJQlN6/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + check_custom_cluster_name monitoring-2-0-3412-monitoring-cfg-0 /tmp/tmp.DuPpQJQlN6/pmm_service_list.json + local pod_service_name=monitoring-2-0-3412-monitoring-cfg-0 + local pmm_services_file=/tmp/tmp.DuPpQJQlN6/pmm_service_list.json + echo 'Checking monitoring-2-0-3412-monitoring-cfg-0' Checking monitoring-2-0-3412-monitoring-cfg-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-3412-monitoring-cfg-0") | .cluster' /tmp/tmp.DuPpQJQlN6/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + [[ -n '' ]] ++ kubectl_bin logs monitoring-rs0-0 pmm-client ++ grep -c 'cannot auto discover databases and collections' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZXj4Ems4V6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.5z3VMc8SSt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZXj4Ems4V6 ++ cat /tmp/tmp.5z3VMc8SSt ++ rm /tmp/tmp.ZXj4Ems4V6 /tmp/tmp.5z3VMc8SSt ++ return 0 + [[ 0 != 0 ]] + helm uninstall monitoring release "monitoring" uninstalled + destroy monitoring-2-0-3412 + local namespace=monitoring-2-0-3412 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.xRsHPcH3IU +++ mktemp ++ local LAST_ERR=/tmp/tmp.UTnALQReQm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xRsHPcH3IU ++ cat /tmp/tmp.UTnALQReQm No resources found in monitoring-2-0-3412 namespace. ++ rm /tmp/tmp.xRsHPcH3IU /tmp/tmp.UTnALQReQm ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.FMNP5Af5j3 ++ mktemp + local LAST_ERR=/tmp/tmp.vBt2ZgP4Xi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FMNP5Af5j3 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.vBt2ZgP4Xi + rm /tmp/tmp.FMNP5Af5j3 /tmp/tmp.vBt2ZgP4Xi + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.1NhT2BK7zj ++ mktemp + local LAST_ERR=/tmp/tmp.HOVPcMV26s + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1NhT2BK7zj + cat /tmp/tmp.HOVPcMV26s + rm /tmp/tmp.1NhT2BK7zj /tmp/tmp.HOVPcMV26s + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.GrsGvhbgI9 ++ mktemp + local LAST_ERR=/tmp/tmp.NAKi7CraBY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GrsGvhbgI9 + cat /tmp/tmp.NAKi7CraBY + rm /tmp/tmp.GrsGvhbgI9 /tmp/tmp.NAKi7CraBY + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.5yRPa1kPBy ++ mktemp + local LAST_ERR=/tmp/tmp.osIlyK6BfL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5yRPa1kPBy + cat /tmp/tmp.osIlyK6BfL + rm /tmp/tmp.5yRPa1kPBy /tmp/tmp.osIlyK6BfL + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.goAkTw7XT7 ++ mktemp + local LAST_ERR=/tmp/tmp.DjMM4PCHQB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2129/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.goAkTw7XT7 clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.DjMM4PCHQB + rm /tmp/tmp.goAkTw7XT7 /tmp/tmp.DjMM4PCHQB + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.OjOkTJQTCn ++ mktemp + local LAST_ERR=/tmp/tmp.iFB9zV1wzu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.OjOkTJQTCn namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted from cert-manager namespace serviceaccount "cert-manager" deleted from cert-manager namespace serviceaccount "cert-manager-webhook" deleted from cert-manager namespace clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.iFB9zV1wzu Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.OjOkTJQTCn + cat /tmp/tmp.iFB9zV1wzu Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.OjOkTJQTCn + cat /tmp/tmp.iFB9zV1wzu Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.OjOkTJQTCn + cat /tmp/tmp.iFB9zV1wzu Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.OjOkTJQTCn /tmp/tmp.iFB9zV1wzu + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace monitoring-2-0-3412 + rm -rf /tmp/tmp.DuPpQJQlN6 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.RrxmMqhQy0 + desc 'test passed' + set +o xtrace ++ mktemp ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.uVCkvfH9Ly ++ mktemp + local LAST_ERR=/tmp/tmp.iHAqCdJ8oq + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.FCxV6yas4h + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace monitoring-2-0-3412